code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , ):
super().__init__()
self.register_modules(transformer=_UpperCAmelCase , vae=_UpperCAmelCase , scheduler=_UpperCAmelCase )
# create a imagenet -> id dictionary for easier use
__a : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__a : Optional[int] = int(_UpperCAmelCase )
__a : Optional[Any] = dict(sorted(self.labels.items() ) )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = list(_UpperCAmelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = 4.0 , _UpperCAmelCase = None , _UpperCAmelCase = 50 , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , ):
__a : List[Any] = len(_UpperCAmelCase )
__a : str = self.transformer.config.sample_size
__a : int = self.transformer.config.in_channels
__a : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_UpperCAmelCase , device=self.device , dtype=self.transformer.dtype , )
__a : Dict = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__a : Any = torch.tensor(_UpperCAmelCase , device=self.device ).reshape(-1 )
__a : Optional[Any] = torch.tensor([1000] * batch_size , device=self.device )
__a : List[str] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__a : Optional[int] = latent_model_input[: len(_UpperCAmelCase ) // 2]
__a : int = torch.cat([half, half] , dim=0 )
__a : int = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = t
if not torch.is_tensor(_UpperCAmelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__a : Tuple = latent_model_input.device.type == '''mps'''
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = torch.floataa if is_mps else torch.floataa
else:
__a : List[Any] = torch.intaa if is_mps else torch.intaa
__a : Optional[Any] = torch.tensor([timesteps] , dtype=_UpperCAmelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__a : Union[str, Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a : Tuple = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__a : List[Any] = self.transformer(
_UpperCAmelCase , timestep=_UpperCAmelCase , class_labels=_UpperCAmelCase ).sample
# perform guidance
if guidance_scale > 1:
__a , __a : Dict = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__a , __a : Dict = torch.split(_UpperCAmelCase , len(_UpperCAmelCase ) // 2 , dim=0 )
__a : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__a : Tuple = torch.cat([half_eps, half_eps] , dim=0 )
__a : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__a , __a : int = torch.split(_UpperCAmelCase , _UpperCAmelCase , dim=1 )
else:
__a : Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__a : Tuple = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
if guidance_scale > 1:
__a , __a : str = latent_model_input.chunk(2 , dim=0 )
else:
__a : int = latent_model_input
__a : List[str] = 1 / self.vae.config.scaling_factor * latents
__a : Dict = self.vae.decode(_UpperCAmelCase ).sample
__a : Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__a : int = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 52 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = False
__lowerCAmelCase = 3.0
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a : List[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__a : int = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _UpperCAmelCase )
@require_multi_gpu
def _lowerCamelCase ( self ):
__a : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A = Accelerator(kwargs_handlers=[ddp_scaler])
A = torch.nn.Linear(100, 200)
A = accelerator.prepare(model)
# Check the values changed in kwargs
A = ''''''
A = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 52 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=128 , _UpperCAmelCase=32 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
__a : Optional[Any] = parent
__a : Optional[int] = batch_size
__a : Tuple = seq_length
__a : List[Any] = is_training
__a : Optional[int] = use_input_mask
__a : int = use_token_type_ids
__a : Optional[int] = use_labels
__a : Any = vocab_size
__a : Union[str, Any] = hidden_size
__a : List[Any] = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : List[str] = intermediate_size
__a : Dict = hidden_act
__a : List[str] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Any = max_position_embeddings
__a : Any = type_vocab_size
__a : Union[str, Any] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : List[str] = num_labels
__a : Tuple = num_choices
__a : str = scope
def _lowerCamelCase ( self ):
__a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[str] = None
if self.use_input_mask:
__a : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__a : List[str] = None
if self.use_token_type_ids:
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : str = None
__a : List[str] = None
__a : Union[str, Any] = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : str = ids_tensor([self.batch_size] , self.num_choices )
__a : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Any = self.prepare_config_and_inputs()
__a : str = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Any = NezhaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__a : str = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
__a : int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a : Dict = True
__a : str = NezhaModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__a : Union[str, Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
__a : Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = NezhaForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = NezhaForNextSentencePrediction(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Tuple = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = NezhaForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : str = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = NezhaForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Dict = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = self.num_labels
__a : Any = NezhaForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = self.num_labels
__a : Tuple = NezhaForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = self.num_choices
__a : Optional[int] = NezhaForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : List[str] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self ):
__a : str = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = config_and_inputs
__a : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = True
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
__a : Dict = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
__a : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
__a : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def _lowerCamelCase ( self ):
__a : List[Any] = NezhaModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = self.model_tester.prepare_config_and_inputs_for_decoder()
__a : List[Any] = None
self.model_tester.create_and_check_model_as_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
def _lowerCamelCase ( self ):
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Tuple = NezhaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
__a , __a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__a : str = True
__a : str = model_class(config=_UpperCAmelCase )
__a : List[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__a : int = torch.jit.trace(
_UpperCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''bert.pt''' ) )
__a : int = torch.jit.load(os.path.join(_UpperCAmelCase , '''bert.pt''' ) , map_location=_UpperCAmelCase )
loaded(inputs_dict['''input_ids'''].to(_UpperCAmelCase ) , inputs_dict['''attention_mask'''].to(_UpperCAmelCase ) )
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
__a : str = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__a : Dict = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__a : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a : Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__a : Optional[int] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
__a : Optional[int] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def _lowerCamelCase ( self ):
__a : Optional[int] = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__a : Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__a : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a : Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__a : List[Any] = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , _UpperCAmelCase )
__a : List[Any] = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 52 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 | 1 |
"""simple docstring"""
def __A ( a_ :int) -> int:
if not isinstance(a_ , a_):
raise TypeError('''only integers accepted as input''')
else:
__a : Optional[Any] = str(abs(a_))
__a : List[Any] = [list(a_) for char in range(len(a_))]
for index in range(len(a_)):
num_transpositions[index].pop(a_)
return max(
int(''''''.join(list(a_))) for transposition in num_transpositions)
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 52 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A = get_logger(__name__)
A = Path(__file__).parent / '''model_card_template.md'''
A = uuida().hex
A = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __A ( a_ :Union[Dict, str, None] = None) -> str:
__a : Union[str, Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''').upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(a_ , a_):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items())
elif isinstance(a_ , a_):
ua += "; " + user_agent
return ua
def __A ( a_ :str , a_ :Optional[str] = None , a_ :Optional[str] = None) -> Optional[int]:
if token is None:
__a : Any = HfFolder.get_token()
if organization is None:
__a : List[Any] = whoami(a_)['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def __A ( a_ :Union[str, Any] , a_ :List[str]) -> Optional[Any]:
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''')
if hasattr(a_ , '''local_rank''') and args.local_rank not in [-1, 0]:
return
__a : int = args.hub_token if hasattr(a_ , '''hub_token''') else None
__a : Any = get_full_repo_name(a_ , token=a_)
__a : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=a_ , model_name=a_ , repo_name=a_ , dataset_name=args.dataset_name if hasattr(a_ , '''dataset_name''') else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(a_ , '''gradient_accumulation_steps''') else None
) , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta1''') else None , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta2''') else None , adam_weight_decay=args.adam_weight_decay if hasattr(a_ , '''adam_weight_decay''') else None , adam_epsilon=args.adam_epsilon if hasattr(a_ , '''adam_epsilon''') else None , lr_scheduler=args.lr_scheduler if hasattr(a_ , '''lr_scheduler''') else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(a_ , '''lr_warmup_steps''') else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(a_ , '''ema_inv_gamma''') else None , ema_power=args.ema_power if hasattr(a_ , '''ema_power''') else None , ema_max_decay=args.ema_max_decay if hasattr(a_ , '''ema_max_decay''') else None , mixed_precision=args.mixed_precision , )
__a : List[Any] = os.path.join(args.output_dir , '''README.md''')
model_card.save(a_)
def __A ( a_ :Optional[str] , a_ :Optional[str] = None) -> Union[str, Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
__a : Any = str(Path(a_).as_posix())
__a : Optional[int] = re.search(R'''snapshots/([^/]+)/''' , a_)
if search is None:
return None
__a : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(a_) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
A = os.path.join(hf_cache_home, '''diffusers''')
def __A ( a_ :Optional[str] = None , a_ :Optional[str] = None) -> None:
if new_cache_dir is None:
__a : Dict = DIFFUSERS_CACHE
if old_cache_dir is None:
__a : List[Any] = old_diffusers_cache
__a : Union[str, Any] = Path(a_).expanduser()
__a : Dict = Path(a_).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*'''):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__a : List[Any] = new_cache_dir / old_blob_path.relative_to(a_)
new_blob_path.parent.mkdir(parents=a_ , exist_ok=a_)
os.replace(a_ , a_)
try:
os.symlink(a_ , a_)
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''')
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
A = 0
else:
with open(cache_version_file) as f:
try:
A = int(f.read())
except ValueError:
A = 0
if cache_version < 1:
A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
A = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __A ( a_ :str , a_ :Optional[str] = None) -> str:
if variant is not None:
__a : Dict = weights_name.split('''.''')
__a : List[Any] = splits[:-1] + [variant] + splits[-1:]
__a : Tuple = '''.'''.join(a_)
return weights_name
def __A ( a_ :List[Any] , *,
a_ :Union[str, Any] , a_ :Dict , a_ :Union[str, Any] , a_ :Optional[int] , a_ :str , a_ :Any , a_ :str , a_ :Optional[int] , a_ :str , a_ :Tuple , a_ :List[str]=None , ) -> Dict:
__a : int = str(a_)
if os.path.isfile(a_):
return pretrained_model_name_or_path
elif os.path.isdir(a_):
if os.path.isfile(os.path.join(a_ , a_)):
# Load from a PyTorch checkpoint
__a : Union[str, Any] = os.path.join(a_ , a_)
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(a_ , a_ , a_)):
__a : Optional[Any] = os.path.join(a_ , a_ , a_)
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""")
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(a_).base_version) >= version.parse('''0.20.0''')
):
try:
__a : Any = hf_hub_download(
a_ , filename=_add_variant(a_ , a_) , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , a_ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(a_ , a_)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(a_ , a_)}' so that the correct variant file can be added.""" , a_ , )
try:
# 2. Load model file as usual
__a : Optional[Any] = hf_hub_download(
a_ , filename=a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''')
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""")
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""")
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""")
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''')
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""")
| 52 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_text_model'''
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = vocab_size
__a : Optional[int] = hidden_size
__a : Dict = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[int] = hidden_act
__a : List[Any] = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = initializer_range
__a : Dict = layer_norm_eps
__a : Any = position_embedding_type
__a : Dict = use_cache
__a : Dict = pad_token_id
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : List[str] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_vision_model'''
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.2_5 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 0.0_0_1 , _UpperCAmelCase = 0.9_9 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Tuple = num_channels
__a : str = image_size
__a : List[Any] = width_coefficient
__a : Optional[int] = depth_coefficient
__a : Union[str, Any] = depth_divisor
__a : int = kernel_sizes
__a : Dict = in_channels
__a : List[str] = out_channels
__a : Any = depthwise_padding
__a : str = strides
__a : Optional[Any] = num_block_repeats
__a : Optional[Any] = expand_ratios
__a : Any = squeeze_expansion_ratio
__a : int = hidden_act
__a : Union[str, Any] = hidden_dim
__a : Union[str, Any] = pooling_type
__a : Tuple = initializer_range
__a : List[str] = batch_norm_eps
__a : List[Any] = batch_norm_momentum
__a : Union[str, Any] = drop_connect_rate
__a : List[Any] = sum(_UpperCAmelCase ) * 4
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Optional[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align'''
__lowerCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=640 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
__a : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
__a : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
__a : Any = AlignTextConfig(**_UpperCAmelCase )
__a : Any = AlignVisionConfig(**_UpperCAmelCase )
__a : Optional[int] = projection_dim
__a : Union[str, Any] = temperature_init_value
__a : int = initializer_range
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Tuple = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.__class__.model_type
return output
| 52 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Dict = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : List[str] = edge
__a : Optional[int] = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Optional[int] = f.read().strip().split('''\n''')
__a : Dict = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : Tuple = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 52 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 1 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
A = parse(importlib.metadata.version('''torch'''))
def __A ( a_ :Union[str, Version] , a_ :str , a_ :str) -> Dict:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}""")
__a : int = STR_OPERATION_TO_FUNC[operation]
if isinstance(a_ , a_):
__a : List[str] = parse(importlib.metadata.version(a_))
return operation(a_ , parse(a_))
def __A ( a_ :str , a_ :str) -> int:
return compare_versions(a_ , a_ , a_)
| 52 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A = logging.getLogger(__name__)
def __A ( a_ :Union[str, Any] , a_ :Dict) -> Union[str, Any]:
__a : Optional[int] = np.argmax(a_ , axis=1)
return np.sum(outputs == labels)
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf_8''') as f:
__a : List[Any] = csv.reader(a_)
__a : List[str] = []
next(a_) # skip the first line
for line in tqdm(a_):
output.append((''' '''.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def __A ( a_ :Dict , a_ :str , a_ :str , a_ :List[Any] , a_ :Tuple , a_ :List[Any]) -> Any:
__a : List[str] = []
for dataset in encoded_datasets:
__a : List[str] = len(a_)
__a : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
__a : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa)
__a : Tuple = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa)
__a : Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(a_):
__a : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = with_conta
__a : int = with_conta
__a : List[str] = len(a_) - 1
__a : int = len(a_) - 1
__a : Optional[int] = with_conta
__a : Tuple = with_conta
__a : List[Any] = mc_label
__a : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(a_) for t in all_inputs))
return tensor_datasets
def __A ( ) -> Union[str, Any]:
__a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , default='''openai-gpt''' , help='''pretrained model name''')
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''')
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''')
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=a_ , default='''''')
parser.add_argument('''--eval_dataset''' , type=a_ , default='''''')
parser.add_argument('''--seed''' , type=a_ , default=42)
parser.add_argument('''--num_train_epochs''' , type=a_ , default=3)
parser.add_argument('''--train_batch_size''' , type=a_ , default=8)
parser.add_argument('''--eval_batch_size''' , type=a_ , default=16)
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a_ , help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' , type=a_ , default=1)
parser.add_argument(
'''--max_steps''' , default=-1 , type=a_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=a_ , default=6.25e-5)
parser.add_argument('''--warmup_steps''' , default=0 , type=a_ , help='''Linear warmup over warmup_steps.''')
parser.add_argument('''--lr_schedule''' , type=a_ , default='''warmup_linear''')
parser.add_argument('''--weight_decay''' , type=a_ , default=0.0_1)
parser.add_argument('''--lm_coef''' , type=a_ , default=0.9)
parser.add_argument('''--n_valid''' , type=a_ , default=3_74)
parser.add_argument('''--server_ip''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
__a : str = parser.parse_args()
print(a_)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a_)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
__a : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
__a : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(a_ , a_))
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a : List[str] = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(a_)
__a : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_)
__a : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(a_))
model.to(a_)
# Load and encode the datasets
def tokenize_and_encode(a_ :List[Any]):
if isinstance(a_ , a_):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(a_))
elif isinstance(a_ , a_):
return obj
return [tokenize_and_encode(a_) for o in obj]
logger.info('''Encoding dataset...''')
__a : Dict = load_rocstories_dataset(args.train_dataset)
__a : int = load_rocstories_dataset(args.eval_dataset)
__a : Optional[int] = (train_dataset, eval_dataset)
__a : List[Any] = tokenize_and_encode(a_)
# Compute the max input length for the Transformer
__a : List[Any] = model.config.n_positions // 2 - 2
__a : int = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
__a : Union[str, Any] = min(a_ , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a : Tuple = pre_process_datasets(a_ , a_ , a_ , *a_)
__a , __a : Tuple = tensor_datasets[0], tensor_datasets[1]
__a : List[str] = TensorDataset(*a_)
__a : Optional[Any] = RandomSampler(a_)
__a : str = DataLoader(a_ , sampler=a_ , batch_size=args.train_batch_size)
__a : List[str] = TensorDataset(*a_)
__a : Optional[int] = SequentialSampler(a_)
__a : Optional[Any] = DataLoader(a_ , sampler=a_ , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a : int = args.max_steps
__a : Optional[int] = args.max_steps // (len(a_) // args.gradient_accumulation_steps) + 1
else:
__a : str = len(a_) // args.gradient_accumulation_steps * args.num_train_epochs
__a : List[Any] = list(model.named_parameters())
__a : Optional[int] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a : List[str] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], '''weight_decay''': 0.0},
]
__a : int = AdamW(a_ , lr=args.learning_rate , eps=args.adam_epsilon)
__a : Union[str, Any] = get_linear_schedule_with_warmup(
a_ , num_warmup_steps=args.warmup_steps , num_training_steps=a_)
if args.do_train:
__a , __a , __a : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc='''Epoch'''):
__a : Dict = 0
__a : Dict = 0
__a : List[str] = tqdm(a_ , desc='''Training''')
for step, batch in enumerate(a_):
__a : Dict = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : str = batch
__a : List[Any] = model(a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a : int = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a : Tuple = '''Training loss: {:.2e} lr: {:.2e}'''.format(a_ , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a : Dict = model.module if hasattr(a_ , '''module''') else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a : int = os.path.join(args.output_dir , a_)
__a : str = os.path.join(args.output_dir , a_)
torch.save(model_to_save.state_dict() , a_)
model_to_save.config.to_json_file(a_)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
__a : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(a_)
if args.do_eval:
model.eval()
__a , __a : List[Any] = 0, 0
__a , __a : Union[str, Any] = 0, 0
for batch in tqdm(a_ , desc='''Evaluating'''):
__a : str = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : List[Any] = batch
with torch.no_grad():
__a , __a , __a , __a : str = model(
a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : List[str] = mc_logits.detach().cpu().numpy()
__a : Optional[Any] = mc_labels.to('''cpu''').numpy()
__a : str = accuracy(a_ , a_)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
__a : Tuple = eval_loss / nb_eval_steps
__a : List[str] = eval_accuracy / nb_eval_examples
__a : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
__a : List[str] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a : Dict = os.path.join(args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key in sorted(result.keys()):
logger.info(''' %s = %s''' , a_ , str(result[key]))
writer.write('''%s = %s\n''' % (key, str(result[key])))
if __name__ == "__main__":
main()
| 52 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=[10, 20, 30, 40] , _UpperCAmelCase=[2, 2, 3, 2] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=["stage2", "stage3", "stage4"] , _UpperCAmelCase=[2, 3, 4] , _UpperCAmelCase=None , ):
__a : Tuple = parent
__a : Optional[Any] = batch_size
__a : List[Any] = image_size
__a : Optional[int] = num_channels
__a : int = num_stages
__a : List[str] = hidden_sizes
__a : Tuple = depths
__a : Union[str, Any] = is_training
__a : Optional[int] = use_labels
__a : Union[str, Any] = intermediate_size
__a : str = hidden_act
__a : List[str] = num_labels
__a : Union[str, Any] = initializer_range
__a : Dict = out_features
__a : Union[str, Any] = out_indices
__a : Optional[int] = scope
def _lowerCamelCase ( self ):
__a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Optional[int] = None
if self.use_labels:
__a : Dict = ids_tensor([self.batch_size] , self.num_labels )
__a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = ConvNextModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Tuple = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : int = ConvNextForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = ConvNextBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[str] = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__a : List[str] = None
__a : Union[str, Any] = ConvNextBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[str] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self ):
__a : List[str] = self.prepare_config_and_inputs()
__a , __a , __a : int = config_and_inputs
__a : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : Optional[int] = ConvNextModelTester(self )
__a : List[str] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self ):
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Tuple = model_class(_UpperCAmelCase )
__a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : str = [*signature.parameters.keys()]
__a : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__a : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Any = ConvNextModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __A ( ) -> Any:
__a : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self ):
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
__a : Optional[Any] = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(_UpperCAmelCase )
__a : Optional[Any] = self.default_image_processor
__a : Optional[Any] = prepare_img()
__a : List[str] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__a : int = model(**_UpperCAmelCase )
# verify the logits
__a : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__a : int = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@require_torch
class __lowercase ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (ConvNextBackbone,) if is_torch_available() else ()
__lowerCAmelCase = ConvNextConfig
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : Tuple = ConvNextModelTester(self )
| 52 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : List[str] = is_training
__a : Optional[Any] = use_attention_mask
__a : Optional[Any] = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Dict = intermediate_size
__a : List[str] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : Optional[int] = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52 | 1 |
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
A = '''sshleifer/mar_enro_6_3_student'''
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
super().setUp()
__a : Dict = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_UpperCAmelCase , )
__a : int = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
MarianMTModel.from_pretrained(_UpperCAmelCase )
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
__a : List[Any] = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
__a : List[Any] = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
__a : Dict = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
__a : Dict = bash_script.replace(_UpperCAmelCase , str(_UpperCAmelCase ) )
__a : Dict = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__a : int = f"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__a : Optional[int] = ['''finetune.py'''] + bash_script.split() + args
with patch.object(_UpperCAmelCase , '''argv''' , _UpperCAmelCase ):
__a : str = argparse.ArgumentParser()
__a : int = pl.Trainer.add_argparse_args(_UpperCAmelCase )
__a : Optional[Any] = SummarizationModule.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__a : Optional[int] = parser.parse_args()
__a : Union[str, Any] = main(_UpperCAmelCase )
# Check metrics
__a : int = load_json(model.metrics_save_path )
__a : Tuple = metrics['''val'''][0]
__a : Union[str, Any] = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , _UpperCAmelCase )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.0_1 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__a : Optional[int] = os.listdir(_UpperCAmelCase )
__a : Optional[int] = [x for x in contents if x.endswith('''.ckpt''' )][0]
__a : Tuple = os.path.join(args.output_dir , _UpperCAmelCase )
__a : int = torch.load(_UpperCAmelCase , map_location='''cpu''' )
__a : Optional[Any] = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__a : Optional[int] = {os.path.basename(_UpperCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowerCamelCase ( self ):
__a : Any = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
__a : List[Any] = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
__a : Union[str, Any] = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
__a : str = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
__a : Optional[Any] = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
__a : List[str] = bash_script.replace(_UpperCAmelCase , str(_UpperCAmelCase ) )
__a : str = self.get_auto_remove_tmp_dir()
__a : List[Any] = bash_script.replace('''--fp16''' , '''''' )
__a : str = 6
__a : Dict = (
['''distillation.py''']
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"""--num_train_epochs={epochs}""",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_UpperCAmelCase , '''argv''' , _UpperCAmelCase ):
__a : str = argparse.ArgumentParser()
__a : Tuple = pl.Trainer.add_argparse_args(_UpperCAmelCase )
__a : str = SummarizationDistiller.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__a : Any = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__a : List[Any] = distill_main(_UpperCAmelCase )
# Check metrics
__a : Union[str, Any] = load_json(model.metrics_save_path )
__a : Union[str, Any] = metrics['''val'''][0]
__a : Tuple = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , _UpperCAmelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
__a : Optional[Any] = os.listdir(_UpperCAmelCase )
__a : Dict = [x for x in contents if x.endswith('''.ckpt''' )][0]
__a : str = os.path.join(args.output_dir , _UpperCAmelCase )
__a : Optional[int] = torch.load(_UpperCAmelCase , map_location='''cpu''' )
__a : int = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__a : int = {os.path.basename(_UpperCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 52 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''levit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 8, 12] , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = image_size
__a : List[Any] = num_channels
__a : Dict = kernel_size
__a : Optional[int] = stride
__a : Optional[int] = padding
__a : Dict = hidden_sizes
__a : int = num_attention_heads
__a : Optional[int] = depths
__a : str = key_dim
__a : Union[str, Any] = drop_path_rate
__a : Optional[Any] = patch_size
__a : Tuple = attention_ratio
__a : int = mlp_ratio
__a : int = initializer_range
__a : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4
| 52 | 1 |
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 52 |
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 52 | 1 |
"""simple docstring"""
def __A ( a_ :Optional[int]) -> int:
__a : int = 0
__a : Optional[int] = len(a_)
for i in range(n - 1):
for j in range(i + 1 , a_):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __A ( a_ :str) -> List[str]:
if len(a_) <= 1:
return arr, 0
__a : Optional[Any] = len(a_) // 2
__a : str = arr[0:mid]
__a : List[Any] = arr[mid:]
__a , __a : List[str] = count_inversions_recursive(a_)
__a , __a : str = count_inversions_recursive(a_)
__a , __a : int = _count_cross_inversions(a_ , a_)
__a : List[str] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __A ( a_ :Tuple , a_ :List[str]) -> str:
__a : List[str] = []
__a : Dict = 0
while i < len(a_) and j < len(a_):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(a_) - i
r.append(q[j])
j += 1
else:
r.append(p[i])
i += 1
if i < len(a_):
r.extend(p[i:])
else:
r.extend(q[j:])
return r, num_inversion
def __A ( ) -> Any:
__a : Dict = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__a : Any = count_inversions_bf(a_)
__a , __a : Dict = count_inversions_recursive(a_)
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , a_)
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__a : str = count_inversions_bf(a_)
__a , __a : List[Any] = count_inversions_recursive(a_)
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , a_)
# an empty list should also have zero inversions
__a : Dict = []
__a : List[Any] = count_inversions_bf(a_)
__a , __a : Tuple = count_inversions_recursive(a_)
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , a_)
if __name__ == "__main__":
main()
| 52 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Dict = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : List[str] = edge
__a : Optional[int] = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Optional[int] = f.read().strip().split('''\n''')
__a : Dict = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : Tuple = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 52 | 1 |
"""simple docstring"""
from math import factorial
A = {str(digit): factorial(digit) for digit in range(10)}
def __A ( a_ :int) -> int:
if not isinstance(a_ , a_):
raise TypeError('''Parameter number must be int''')
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''')
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(a_))
def __A ( a_ :int = 60 , a_ :int = 1_00_00_00) -> int:
if not isinstance(a_ , a_) or not isinstance(a_ , a_):
raise TypeError('''Parameters chain_length and number_limit must be int''')
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''')
# the counter for the chains with the exact desired length
__a : int = 0
# the cached sizes of the previous chains
__a : dict[int, int] = {}
for start_chain_element in range(1 , a_):
# The temporary set will contain the elements of the chain
__a : Tuple = set()
__a : Union[str, Any] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__a : str = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(a_)
chain_set_length += 1
__a : Optional[int] = digit_factorial_sum(a_)
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__a : Optional[int] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''trocr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
__a : List[str] = vocab_size
__a : Optional[Any] = d_model
__a : Optional[Any] = decoder_layers
__a : Union[str, Any] = decoder_attention_heads
__a : int = decoder_ffn_dim
__a : List[Any] = activation_function
__a : Any = max_position_embeddings
__a : Dict = dropout
__a : List[Any] = attention_dropout
__a : Optional[Any] = activation_dropout
__a : str = init_std
__a : List[str] = decoder_layerdrop
__a : Union[str, Any] = use_cache
__a : Optional[Any] = scale_embedding
__a : List[Any] = use_learned_position_embeddings
__a : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 52 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''GLPNFeatureExtractor''']
A = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __A ( a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Optional[Any] , a_ :Optional[int]=5) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''') == 1
__a : Optional[Any] = torch.tensor(tokenizer.encode(a_ , add_special_tokens=a_)).unsqueeze(0) # Batch size 1
__a : Dict = model(a_)[0] # The last hidden-state is the first element of the output tuple
__a : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__a : Any = logits[0, masked_index, :]
__a : Any = logits.softmax(dim=0)
__a , __a : Optional[Any] = prob.topk(k=a_ , dim=0)
__a : Optional[int] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(a_))])
__a : List[str] = tokenizer.mask_token
__a : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''')):
__a : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''')
if " {0}".format(a_) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(a_) , a_),
values[index].item(),
predicted_token,
))
else:
topk_filled_outputs.append(
(
masked_input.replace(a_ , a_),
values[index].item(),
predicted_token,
))
return topk_filled_outputs
A = CamembertTokenizer.from_pretrained('''camembert-base''')
A = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
A = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 52 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = False
__lowerCAmelCase = 3.0
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a : List[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__a : int = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _UpperCAmelCase )
@require_multi_gpu
def _lowerCamelCase ( self ):
__a : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A = Accelerator(kwargs_handlers=[ddp_scaler])
A = torch.nn.Linear(100, 200)
A = accelerator.prepare(model)
# Check the values changed in kwargs
A = ''''''
A = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 52 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[int] = [10, 20, 30, 40, 50, 60]
__a : Union[str, Any] = [2, 4, 6, 8, 10, 12]
__a : List[str] = 100
self.assertEqual(kp.calc_profit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 210 )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Weight can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Profit can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(
_UpperCAmelCase , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
A = {
'''google/rembert''': 256,
}
A = '''▁'''
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = RemBertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , **_UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
__a : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
__a : Tuple = do_lower_case
__a : Tuple = remove_space
__a : Tuple = keep_accents
__a : str = vocab_file
__a : Optional[Any] = False if not self.vocab_file else True
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Optional[Any] = [self.sep_token_id]
__a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : List[str] = [self.sep_token_id]
__a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_UpperCAmelCase ) )
return
__a : int = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,)
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''llama'''
__lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32000 , _UpperCAmelCase=4096 , _UpperCAmelCase=11008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : Dict = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : str = hidden_size
__a : List[str] = intermediate_size
__a : Any = num_hidden_layers
__a : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__a : Union[str, Any] = num_attention_heads
__a : Optional[int] = num_key_value_heads
__a : Dict = hidden_act
__a : Union[str, Any] = initializer_range
__a : int = rms_norm_eps
__a : Optional[int] = pretraining_tp
__a : Optional[Any] = use_cache
__a : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__a : Tuple = self.rope_scaling.get('''type''' , _UpperCAmelCase )
__a : Optional[int] = self.rope_scaling.get('''factor''' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 52 | 1 |
"""simple docstring"""
import os
import sys
import unittest
A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A = os.path.join(git_repo_path, '''src''', '''transformers''')
A = '''
{0} = None
'''
A = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
A = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[Any] = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(_UpperCAmelCase )
__a : Optional[int] = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(_UpperCAmelCase , '''tokenizers''' )
__a : List[Any] = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(_UpperCAmelCase , '''tensorflow_text''' )
__a : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(_UpperCAmelCase , '''sentencepiece_and_tokenizers''' )
__a : str = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(_UpperCAmelCase , '''sentencepiece_and_tensorflow_text''' )
__a : Union[str, Any] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(_UpperCAmelCase , '''sentencepiece_and_tokenizers_and_vision''' )
def _lowerCamelCase ( self ):
__a : str = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _UpperCAmelCase )
self.assertIn('''tensorflow_text''' , _UpperCAmelCase )
self.assertIn('''sentencepiece_and_tokenizers''' , _UpperCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def _lowerCamelCase ( self ):
__a : str = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_UpperCAmelCase , '''\nCONSTANT = None\n''' )
__a : str = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_UpperCAmelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
__a : int = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__a : List[str] = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__a : Any = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _UpperCAmelCase )
| 52 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__a : int = parent
__a : str = batch_size
__a : List[Any] = num_channels
__a : Union[str, Any] = image_size
__a : List[Any] = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 20}
__a : str = do_thumbnail
__a : str = do_align_axis
__a : Dict = do_pad
__a : Union[str, Any] = do_normalize
__a : List[str] = image_mean
__a : Optional[int] = image_std
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : Tuple = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCamelCase ( self ):
pass
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : str = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 52 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int]) -> int:
if not nums:
return 0
__a : Any = nums[0]
__a : Optional[Any] = 0
for num in nums[1:]:
__a , __a : Optional[Any] = (
max_excluding + num,
max(a_ , a_),
)
return max(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : str = params
__a : Optional[Any] = np.array(_UpperCAmelCase )
__a : Tuple = np.array([len(_UpperCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , _UpperCAmelCase ):
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
return len(self.lengths )
def _lowerCamelCase ( self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.params.max_model_input_size
__a : List[str] = self.lengths > max_len
logger.info(f"""Splitting {sum(_UpperCAmelCase )} too long sequences.""" )
def divide_chunks(_UpperCAmelCase , _UpperCAmelCase ):
return [l[i : i + n] for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase )]
__a : Any = []
__a : Tuple = []
if self.params.mlm:
__a , __a : Tuple = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
__a , __a : Any = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a : List[str] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a : Tuple = np.insert(_UpperCAmelCase , 0 , _UpperCAmelCase )
if sub_s[-1] != sep_id:
__a : Any = np.insert(_UpperCAmelCase , len(_UpperCAmelCase ) , _UpperCAmelCase )
assert len(_UpperCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_UpperCAmelCase )
new_tok_ids.extend(_UpperCAmelCase )
new_lengths.extend([len(_UpperCAmelCase ) for l in sub_seqs] )
__a : Optional[Any] = np.array(_UpperCAmelCase )
__a : List[Any] = np.array(_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = len(self )
__a : List[Any] = self.lengths > 11
__a : int = self.token_ids[indices]
__a : Tuple = self.lengths[indices]
__a : Optional[Any] = len(self )
logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def _lowerCamelCase ( self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a : int = self.params.special_tok_ids['''unk_token''']
__a : Union[str, Any] = len(self )
__a : Optional[int] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a : str = (unk_occs / self.lengths) < 0.5
__a : Tuple = self.token_ids[indices]
__a : List[Any] = self.lengths[indices]
__a : Tuple = len(self )
logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def _lowerCamelCase ( self ):
if not self.params.is_master:
return
logger.info(f"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : List[Any] = [t[0] for t in batch]
__a : Union[str, Any] = [t[1] for t in batch]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
# Max for paddings
__a : List[str] = max(_UpperCAmelCase )
# Pad token ids
if self.params.mlm:
__a : Optional[int] = self.params.special_tok_ids['''pad_token''']
else:
__a : Dict = self.params.special_tok_ids['''unk_token''']
__a : List[str] = [list(t.astype(_UpperCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(_UpperCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(_UpperCAmelCase )
assert all(len(_UpperCAmelCase ) == max_seq_len_ for t in tk_ )
__a : Union[str, Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a : Any = torch.tensor(_UpperCAmelCase ) # (bs)
return tk_t, lg_t
| 52 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A = '''▁'''
A = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BigBirdTokenizer
__lowerCAmelCase = BigBirdTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def _lowerCamelCase ( self ):
super().setUp()
__a : Dict = self.tokenizer_class(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : List[str] = '''<s>'''
__a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_UpperCAmelCase ) , 1004 )
def _lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__a : Dict = self.get_tokenizer()
__a : Any = self.get_rust_tokenizer()
__a : int = '''I was born in 92000, and this is falsé.'''
__a : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
__a : List[str] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__a : Any = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = self.get_rust_tokenizer()
__a : Tuple = tokenizer.encode(_UpperCAmelCase )
__a : List[Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = BigBirdTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
__a : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
__a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _lowerCamelCase ( self ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def _lowerCamelCase ( self ):
__a : str = '''Hello World!'''
__a : str = [65, 18536, 2260, 101, 66]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def _lowerCamelCase ( self ):
__a : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__a : Optional[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__a : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__a : List[str] = ''' '''.join(_UpperCAmelCase )
__a : Tuple = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Optional[Any] = BigBirdConfig(attention_type='''original_full''' )
__a : Tuple = BigBirdModel(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
__a : Union[str, Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__a : List[Any] = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def _lowerCamelCase ( self ):
# fmt: off
__a : Optional[Any] = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 52 | 1 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
A = '''sshleifer/bart-tiny-random'''
A = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self ):
return AutoConfig.from_pretrained(_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , *__a : Union[str, Any] = create_student_by_copying_alternating_layers(_UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowerCamelCase ( self ):
__a , *__a : Optional[Any] = create_student_by_copying_alternating_layers(_UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , *__a : Optional[int] = create_student_by_copying_alternating_layers(_UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=_UpperCAmelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowerCamelCase ( self ):
__a , *__a : List[str] = create_student_by_copying_alternating_layers(_UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowerCamelCase ( self ):
with self.assertRaises(_UpperCAmelCase ):
create_student_by_copying_alternating_layers(_UpperCAmelCase , tempfile.mkdtemp() , e=_UpperCAmelCase , d=_UpperCAmelCase )
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''convnextv2'''
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=224 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : List[str] = num_channels
__a : str = patch_size
__a : Dict = num_stages
__a : List[str] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__a : List[str] = [3, 3, 9, 3] if depths is None else depths
__a : List[Any] = hidden_act
__a : Any = initializer_range
__a : Optional[int] = layer_norm_eps
__a : List[Any] = drop_path_rate
__a : Any = image_size
__a : str = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a : Optional[int] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 52 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (DDPMScheduler,)
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : int = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_UpperCAmelCase )
return config
def _lowerCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Dict = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def _lowerCamelCase ( self ):
__a : int = self.scheduler_classes[0]
__a : int = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**_UpperCAmelCase )
__a : int = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[Any] = self.dummy_sample_deter
__a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Optional[int] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : List[Any] = pred_prev_sample
__a : int = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = self.scheduler_classes[0]
__a : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a : int = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[str] = self.dummy_sample_deter
__a : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Dict = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : Optional[int] = pred_prev_sample
__a : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Any = self.get_scheduler_config()
__a : str = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
__a : List[Any] = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
__a : Union[str, Any] = -1
else:
__a : str = timesteps[i + 1]
__a : Dict = scheduler.previous_timestep(_UpperCAmelCase )
__a : str = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
__a : Optional[int] = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : List[str] = scheduler_class(**_UpperCAmelCase )
__a : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 52 | 1 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
A = 50_003
A = 50_002
@require_sentencepiece
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = PLBartTokenizer
__lowerCAmelCase = None
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a : Tuple = PLBartTokenizer(_UpperCAmelCase , language_codes='''base''' , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : List[str] = PLBartTokenizer(_UpperCAmelCase , language_codes='''base''' , keep_accents=_UpperCAmelCase )
__a : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a : List[str] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a : Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__a : Dict = tokenizer.vocab_size
__a : Tuple = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 4 , _UpperCAmelCase )]
self.assertListEqual(_UpperCAmelCase , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
__a : Union[str, Any] = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__a : Optional[int] = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
def _lowerCamelCase ( self ):
__a : str = PLBartTokenizer(_UpperCAmelCase , language_codes='''multi''' , keep_accents=_UpperCAmelCase )
__a : str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a : str = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a : Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__a : List[Any] = tokenizer.vocab_size
__a : Optional[int] = [tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) for x in range(end - 7 , _UpperCAmelCase )]
self.assertListEqual(
_UpperCAmelCase , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
__a : Tuple = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__a : Any = tokenizer(_UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) , _UpperCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = '''uclanlp/plbart-python-en_XX'''
__lowerCAmelCase = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
__lowerCAmelCase = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
__lowerCAmelCase = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def _lowerCamelCase ( cls ):
__a : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
__a : Union[str, Any] = 1
return cls
def _lowerCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 50003 )
def _lowerCamelCase ( self ):
__a : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def _lowerCamelCase ( self ):
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
__a : Union[str, Any] = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
__a : Tuple = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
__a : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : str = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , _UpperCAmelCase )
__a : List[Any] = 10
__a : Optional[Any] = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [50004, 50001] )
def _lowerCamelCase ( self ):
__a : Optional[int] = tempfile.mkdtemp()
__a : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
__a : Tuple = PLBartTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def _lowerCamelCase ( self ):
__a : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors='''pt''' )
__a : List[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _UpperCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _lowerCamelCase ( self ):
__a : Any = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__a : Union[str, Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
__a : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _lowerCamelCase ( self ):
__a : Any = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors='''pt''' )
__a : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors='''pt''' )
__a : Optional[int] = targets['''input_ids''']
__a : Tuple = shift_tokens_right(_UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowerCamelCase ( self ):
__a : Dict = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 50003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 50001,
} , )
| 52 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
A = random.Random()
def __A ( a_ :Tuple , a_ :Dict=1.0 , a_ :str=None , a_ :List[Any]=None) -> Dict:
if rng is None:
__a : Any = global_rng
__a : Tuple = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=2048 , _UpperCAmelCase=128 , _UpperCAmelCase=1 , _UpperCAmelCase=512 , _UpperCAmelCase=30 , _UpperCAmelCase=44100 , ):
__a : Any = parent
__a : Tuple = batch_size
__a : Tuple = min_seq_length
__a : List[str] = max_seq_length
__a : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a : Tuple = spectrogram_length
__a : int = feature_size
__a : int = num_audio_channels
__a : Tuple = hop_length
__a : List[Any] = chunk_length
__a : Any = sampling_rate
def _lowerCamelCase ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
__a : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : Optional[Any] = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = TvltFeatureExtractor
def _lowerCamelCase ( self ):
__a : Optional[Any] = TvltFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
__a : int = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''hop_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''sampling_rate''' ) )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[str] = feat_extract_first.save_pretrained(_UpperCAmelCase )[0]
check_json_file_has_correct_format(_UpperCAmelCase )
__a : Union[str, Any] = self.feature_extraction_class.from_pretrained(_UpperCAmelCase )
__a : Tuple = feat_extract_first.to_dict()
__a : List[Any] = feat_extract_second.to_dict()
__a : int = dict_first.pop('''mel_filters''' )
__a : List[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : int = os.path.join(_UpperCAmelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(_UpperCAmelCase )
__a : Optional[Any] = self.feature_extraction_class.from_json_file(_UpperCAmelCase )
__a : Optional[Any] = feat_extract_first.to_dict()
__a : Any = feat_extract_second.to_dict()
__a : Optional[Any] = dict_first.pop('''mel_filters''' )
__a : Dict = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
# Initialize feature_extractor
__a : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Union[str, Any] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__a : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__a : int = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__a : List[Any] = feature_extractor(
_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 , mask_audio=_UpperCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__a : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a : Any = np.asarray(_UpperCAmelCase )
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__a : int = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
__a : List[str] = self._load_datasamples(1 )
__a : Tuple = TvltFeatureExtractor()
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__a : Dict = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _UpperCAmelCase , atol=1e-4 ) )
| 52 | 1 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def __A ( a_ :np.ndarray) -> np.ndarray:
__a , __a , __a : Dict = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def __A ( a_ :np.ndarray) -> np.ndarray:
return (gray > 1_27) & (gray <= 2_55)
def __A ( a_ :np.ndarray , a_ :np.ndarray) -> np.ndarray:
__a : Any = np.zeros_like(a_)
__a : Dict = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1))
# Copy image to padded image
__a : Any = image
# Iterate over image & apply kernel
for x in range(image.shape[1]):
for y in range(image.shape[0]):
__a : List[str] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__a : Tuple = int(summation > 0)
return output
if __name__ == "__main__":
# read original image
A = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
A = np.array(Image.open(lena_path))
# kernel to be applied
A = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
A = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
A = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 52 |
"""simple docstring"""
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : List[Any] = text, pattern
__a , __a : Tuple = len(_UpperCAmelCase ), len(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self ):
# searches pattern in text and returns index positions
__a : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
__a : List[str] = self.mismatch_in_text(_UpperCAmelCase )
if mismatch_index == -1:
positions.append(_UpperCAmelCase )
else:
__a : Tuple = self.match_in_pattern(self.text[mismatch_index] )
__a : Optional[int] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A = '''ABAABA'''
A = '''AB'''
A = BoyerMooreSearch(text, pattern)
A = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 52 | 1 |
"""simple docstring"""
from itertools import permutations
def __A ( a_ :tuple) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__a : List[Any] = [7, 11, 13, 17]
for i, test in enumerate(a_):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __A ( a_ :int = 10) -> int:
return sum(
int(''''''.join(map(a_ , a_)))
for num in permutations(range(a_))
if is_substring_divisible(a_))
if __name__ == "__main__":
print(F'{solution() = }')
| 52 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
A = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
A = F'https://www.google.com/search?q={query}&num=100'
A = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
A = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
A = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
A = '''\
'''
A = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
A = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 16 , _UpperCAmelCase = True , _UpperCAmelCase=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__a : str = '''cuda'''
else:
__a : str = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__a : Any = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
__a : Union[str, Any] = model.to(_UpperCAmelCase )
__a : str = AutoTokenizer.from_pretrained(_UpperCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__a : Optional[int] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_UpperCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__a : List[str] = model.config.max_length - 1
else:
__a : Optional[int] = model.config.max_length
__a : int = tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='''pt''' , return_attention_mask=_UpperCAmelCase , ).to(_UpperCAmelCase )
__a : List[Any] = encodings['''input_ids''']
__a : Tuple = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__a : Any = []
__a : int = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ) ):
__a : List[Any] = min(start_index + batch_size , len(_UpperCAmelCase ) )
__a : Optional[int] = encoded_texts[start_index:end_index]
__a : Dict = attn_masks[start_index:end_index]
if add_start_token:
__a : Dict = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_UpperCAmelCase )
__a : List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__a : Tuple = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_UpperCAmelCase ), attn_mask] , dim=1 )
__a : List[str] = encoded_batch
with torch.no_grad():
__a : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).logits
__a : Optional[int] = out_logits[..., :-1, :].contiguous()
__a : Tuple = labels[..., 1:].contiguous()
__a : Tuple = attn_mask[..., 1:].contiguous()
__a : int = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _UpperCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_UpperCAmelCase )}
| 52 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = False
__lowerCAmelCase = 3.0
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a : List[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__a : int = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _UpperCAmelCase )
@require_multi_gpu
def _lowerCamelCase ( self ):
__a : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A = Accelerator(kwargs_handlers=[ddp_scaler])
A = torch.nn.Linear(100, 200)
A = accelerator.prepare(model)
# Check the values changed in kwargs
A = ''''''
A = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 52 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Optional[Any] = parent
__a : int = batch_size
__a : Any = seq_length
__a : Tuple = is_training
__a : Any = use_attention_mask
__a : Any = use_token_type_ids
__a : Optional[Any] = use_labels
__a : List[str] = vocab_size
__a : Optional[int] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Any = intermediate_size
__a : List[Any] = hidden_act
__a : Any = hidden_dropout_prob
__a : List[Any] = attention_probs_dropout_prob
__a : str = max_position_embeddings
__a : Dict = type_vocab_size
__a : Tuple = type_sequence_label_size
__a : int = initializer_range
__a : List[str] = num_choices
def _lowerCamelCase ( self ):
__a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Optional[int] = None
if self.use_attention_mask:
__a : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__a : int = None
if self.use_token_type_ids:
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : List[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Any = config_and_inputs
__a : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Tuple = self.prepare_config_and_inputs()
__a , __a , __a , __a : List[Any] = config_and_inputs
__a : Optional[int] = True
__a : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Optional[Any] = FlaxBertModelTester(self )
@slow
def _lowerCamelCase ( self ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
__a : Union[str, Any] = FlaxBertModel.from_pretrained('''bert-base-cased''' )
__a : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 | 1 |
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def __A ( a_ :Any) -> List[Any]:
# getting number of pixels in the image
__a , __a : Dict = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(a_):
for j in range(a_):
__a : Optional[int] = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
A = imread('''image_data/lena.jpg''', 1)
# convert to its negative
A = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 52 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A = get_logger(__name__)
A = Path(__file__).parent / '''model_card_template.md'''
A = uuida().hex
A = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __A ( a_ :Union[Dict, str, None] = None) -> str:
__a : Union[str, Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''').upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(a_ , a_):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items())
elif isinstance(a_ , a_):
ua += "; " + user_agent
return ua
def __A ( a_ :str , a_ :Optional[str] = None , a_ :Optional[str] = None) -> Optional[int]:
if token is None:
__a : Any = HfFolder.get_token()
if organization is None:
__a : List[Any] = whoami(a_)['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def __A ( a_ :Union[str, Any] , a_ :List[str]) -> Optional[Any]:
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''')
if hasattr(a_ , '''local_rank''') and args.local_rank not in [-1, 0]:
return
__a : int = args.hub_token if hasattr(a_ , '''hub_token''') else None
__a : Any = get_full_repo_name(a_ , token=a_)
__a : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=a_ , model_name=a_ , repo_name=a_ , dataset_name=args.dataset_name if hasattr(a_ , '''dataset_name''') else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(a_ , '''gradient_accumulation_steps''') else None
) , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta1''') else None , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta2''') else None , adam_weight_decay=args.adam_weight_decay if hasattr(a_ , '''adam_weight_decay''') else None , adam_epsilon=args.adam_epsilon if hasattr(a_ , '''adam_epsilon''') else None , lr_scheduler=args.lr_scheduler if hasattr(a_ , '''lr_scheduler''') else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(a_ , '''lr_warmup_steps''') else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(a_ , '''ema_inv_gamma''') else None , ema_power=args.ema_power if hasattr(a_ , '''ema_power''') else None , ema_max_decay=args.ema_max_decay if hasattr(a_ , '''ema_max_decay''') else None , mixed_precision=args.mixed_precision , )
__a : List[Any] = os.path.join(args.output_dir , '''README.md''')
model_card.save(a_)
def __A ( a_ :Optional[str] , a_ :Optional[str] = None) -> Union[str, Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
__a : Any = str(Path(a_).as_posix())
__a : Optional[int] = re.search(R'''snapshots/([^/]+)/''' , a_)
if search is None:
return None
__a : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(a_) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
A = os.path.join(hf_cache_home, '''diffusers''')
def __A ( a_ :Optional[str] = None , a_ :Optional[str] = None) -> None:
if new_cache_dir is None:
__a : Dict = DIFFUSERS_CACHE
if old_cache_dir is None:
__a : List[Any] = old_diffusers_cache
__a : Union[str, Any] = Path(a_).expanduser()
__a : Dict = Path(a_).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*'''):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__a : List[Any] = new_cache_dir / old_blob_path.relative_to(a_)
new_blob_path.parent.mkdir(parents=a_ , exist_ok=a_)
os.replace(a_ , a_)
try:
os.symlink(a_ , a_)
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''')
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
A = 0
else:
with open(cache_version_file) as f:
try:
A = int(f.read())
except ValueError:
A = 0
if cache_version < 1:
A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
A = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __A ( a_ :str , a_ :Optional[str] = None) -> str:
if variant is not None:
__a : Dict = weights_name.split('''.''')
__a : List[Any] = splits[:-1] + [variant] + splits[-1:]
__a : Tuple = '''.'''.join(a_)
return weights_name
def __A ( a_ :List[Any] , *,
a_ :Union[str, Any] , a_ :Dict , a_ :Union[str, Any] , a_ :Optional[int] , a_ :str , a_ :Any , a_ :str , a_ :Optional[int] , a_ :str , a_ :Tuple , a_ :List[str]=None , ) -> Dict:
__a : int = str(a_)
if os.path.isfile(a_):
return pretrained_model_name_or_path
elif os.path.isdir(a_):
if os.path.isfile(os.path.join(a_ , a_)):
# Load from a PyTorch checkpoint
__a : Union[str, Any] = os.path.join(a_ , a_)
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(a_ , a_ , a_)):
__a : Optional[Any] = os.path.join(a_ , a_ , a_)
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""")
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(a_).base_version) >= version.parse('''0.20.0''')
):
try:
__a : Any = hf_hub_download(
a_ , filename=_add_variant(a_ , a_) , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , a_ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(a_ , a_)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(a_ , a_)}' so that the correct variant file can be added.""" , a_ , )
try:
# 2. Load model file as usual
__a : Optional[Any] = hf_hub_download(
a_ , filename=a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''')
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""")
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""")
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""")
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''')
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""")
| 52 | 1 |
"""simple docstring"""
from __future__ import annotations
A = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = graph
# mapping node to its parent in resulting breadth first tree
__a : dict[str, str | None] = {}
__a : List[str] = source_vertex
def _lowerCamelCase ( self ):
__a : Union[str, Any] = {self.source_vertex}
__a : Any = None
__a : List[Any] = [self.source_vertex] # first in first out queue
while queue:
__a : List[str] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_UpperCAmelCase )
__a : List[str] = vertex
queue.append(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if target_vertex == self.source_vertex:
return self.source_vertex
__a : int = self.parent.get(_UpperCAmelCase )
if target_vertex_parent is None:
__a : Tuple = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(_UpperCAmelCase )
return self.shortest_path(_UpperCAmelCase ) + f"""->{target_vertex}"""
if __name__ == "__main__":
A = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 52 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_text_model'''
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = vocab_size
__a : Optional[int] = hidden_size
__a : Dict = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[int] = hidden_act
__a : List[Any] = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = initializer_range
__a : Dict = layer_norm_eps
__a : Any = position_embedding_type
__a : Dict = use_cache
__a : Dict = pad_token_id
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : List[str] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_vision_model'''
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.2_5 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 0.0_0_1 , _UpperCAmelCase = 0.9_9 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Tuple = num_channels
__a : str = image_size
__a : List[Any] = width_coefficient
__a : Optional[int] = depth_coefficient
__a : Union[str, Any] = depth_divisor
__a : int = kernel_sizes
__a : Dict = in_channels
__a : List[str] = out_channels
__a : Any = depthwise_padding
__a : str = strides
__a : Optional[Any] = num_block_repeats
__a : Optional[Any] = expand_ratios
__a : Any = squeeze_expansion_ratio
__a : int = hidden_act
__a : Union[str, Any] = hidden_dim
__a : Union[str, Any] = pooling_type
__a : Tuple = initializer_range
__a : List[str] = batch_norm_eps
__a : List[Any] = batch_norm_momentum
__a : Union[str, Any] = drop_connect_rate
__a : List[Any] = sum(_UpperCAmelCase ) * 4
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Optional[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align'''
__lowerCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=640 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
__a : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
__a : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
__a : Any = AlignTextConfig(**_UpperCAmelCase )
__a : Any = AlignVisionConfig(**_UpperCAmelCase )
__a : Optional[int] = projection_dim
__a : Union[str, Any] = temperature_init_value
__a : int = initializer_range
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Tuple = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.__class__.model_type
return output
| 52 | 1 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __A ( a_ :str) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __A ( ) -> Union[str, Any]:
with parallel_backend('''spark'''):
assert ParallelBackendConfig.backend_name == "spark"
__a : List[Any] = [1, 2, 3]
with pytest.raises(a_):
with parallel_backend('''unsupported backend'''):
map_nested(a_ , a_ , num_proc=2)
with pytest.raises(a_):
with parallel_backend('''unsupported backend'''):
map_nested(a_ , a_ , num_proc=-1)
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1])
def __A ( a_ :Optional[Any]) -> Optional[int]:
__a : Optional[Any] = [1, 2]
__a : Dict = {'''a''': 1, '''b''': 2}
__a : Tuple = {'''a''': [1, 2], '''b''': [3, 4]}
__a : Dict = {'''a''': {'''1''': 1}, '''b''': 2}
__a : str = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__a : Dict = [2, 3]
__a : Tuple = {'''a''': 2, '''b''': 3}
__a : str = {'''a''': [2, 3], '''b''': [4, 5]}
__a : str = {'''a''': {'''1''': 2}, '''b''': 3}
__a : Union[str, Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark'''):
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
| 52 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0 ):
__a , __a : Tuple = row, column
__a : Optional[int] = [[default_value for c in range(_UpperCAmelCase )] for r in range(_UpperCAmelCase )]
def __str__( self ):
__a : Optional[Any] = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
__a : Any = 0
for row_vector in self.array:
for obj in row_vector:
__a : List[str] = max(_UpperCAmelCase , len(str(_UpperCAmelCase ) ) )
__a : str = f"""%{max_element_length}s"""
# Make string and return
def single_line(_UpperCAmelCase ) -> str:
nonlocal string_format_identifier
__a : int = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_UpperCAmelCase ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if not (isinstance(_UpperCAmelCase , (list, tuple) ) and len(_UpperCAmelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , _UpperCAmelCase ):
assert self.validate_indicies(_UpperCAmelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , _UpperCAmelCase , _UpperCAmelCase ):
assert self.validate_indicies(_UpperCAmelCase )
__a : Any = value
def __add__( self , _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert self.row == another.row and self.column == another.column
# Add
__a : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a : Dict = self[r, c] + another[r, c]
return result
def __neg__( self ):
__a : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a : Optional[int] = -self[r, c]
return result
def __sub__( self , _UpperCAmelCase ):
return self + (-another)
def __mul__( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , (int, float) ): # Scalar multiplication
__a : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a : Optional[Any] = self[r, c] * another
return result
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): # Matrix multiplication
assert self.column == another.row
__a : int = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__a : int = f"""Unsupported type given for another ({type(_UpperCAmelCase )})"""
raise TypeError(_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__a : Union[str, Any] = self[r, c]
return result
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__a : Tuple = v.transpose()
__a : Optional[Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __A ( ) -> None:
# a^(-1)
__a : str = Matrix(3 , 3 , 0)
for i in range(3):
__a : List[Any] = 1
print(F"""a^(-1) is {ainv}""")
# u, v
__a : Dict = Matrix(3 , 1 , 0)
__a , __a , __a : List[Any] = 1, 2, -3
__a : str = Matrix(3 , 1 , 0)
__a , __a , __a : Union[str, Any] = 4, -2, 5
print(F"""u is {u}""")
print(F"""v is {v}""")
print(F"""uv^T is {u * v.transpose()}""")
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(a_ , a_)}""")
def __A ( ) -> None:
import doctest
doctest.testmod()
testa()
| 52 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A = logging.getLogger(__name__)
def __A ( a_ :Union[str, Any] , a_ :Dict) -> Union[str, Any]:
__a : Optional[int] = np.argmax(a_ , axis=1)
return np.sum(outputs == labels)
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf_8''') as f:
__a : List[Any] = csv.reader(a_)
__a : List[str] = []
next(a_) # skip the first line
for line in tqdm(a_):
output.append((''' '''.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def __A ( a_ :Dict , a_ :str , a_ :str , a_ :List[Any] , a_ :Tuple , a_ :List[Any]) -> Any:
__a : List[str] = []
for dataset in encoded_datasets:
__a : List[str] = len(a_)
__a : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
__a : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa)
__a : Tuple = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa)
__a : Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(a_):
__a : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = with_conta
__a : int = with_conta
__a : List[str] = len(a_) - 1
__a : int = len(a_) - 1
__a : Optional[int] = with_conta
__a : Tuple = with_conta
__a : List[Any] = mc_label
__a : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(a_) for t in all_inputs))
return tensor_datasets
def __A ( ) -> Union[str, Any]:
__a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , default='''openai-gpt''' , help='''pretrained model name''')
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''')
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''')
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=a_ , default='''''')
parser.add_argument('''--eval_dataset''' , type=a_ , default='''''')
parser.add_argument('''--seed''' , type=a_ , default=42)
parser.add_argument('''--num_train_epochs''' , type=a_ , default=3)
parser.add_argument('''--train_batch_size''' , type=a_ , default=8)
parser.add_argument('''--eval_batch_size''' , type=a_ , default=16)
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a_ , help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' , type=a_ , default=1)
parser.add_argument(
'''--max_steps''' , default=-1 , type=a_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=a_ , default=6.25e-5)
parser.add_argument('''--warmup_steps''' , default=0 , type=a_ , help='''Linear warmup over warmup_steps.''')
parser.add_argument('''--lr_schedule''' , type=a_ , default='''warmup_linear''')
parser.add_argument('''--weight_decay''' , type=a_ , default=0.0_1)
parser.add_argument('''--lm_coef''' , type=a_ , default=0.9)
parser.add_argument('''--n_valid''' , type=a_ , default=3_74)
parser.add_argument('''--server_ip''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
__a : str = parser.parse_args()
print(a_)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a_)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
__a : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
__a : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(a_ , a_))
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a : List[str] = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(a_)
__a : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_)
__a : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(a_))
model.to(a_)
# Load and encode the datasets
def tokenize_and_encode(a_ :List[Any]):
if isinstance(a_ , a_):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(a_))
elif isinstance(a_ , a_):
return obj
return [tokenize_and_encode(a_) for o in obj]
logger.info('''Encoding dataset...''')
__a : Dict = load_rocstories_dataset(args.train_dataset)
__a : int = load_rocstories_dataset(args.eval_dataset)
__a : Optional[int] = (train_dataset, eval_dataset)
__a : List[Any] = tokenize_and_encode(a_)
# Compute the max input length for the Transformer
__a : List[Any] = model.config.n_positions // 2 - 2
__a : int = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
__a : Union[str, Any] = min(a_ , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a : Tuple = pre_process_datasets(a_ , a_ , a_ , *a_)
__a , __a : Tuple = tensor_datasets[0], tensor_datasets[1]
__a : List[str] = TensorDataset(*a_)
__a : Optional[Any] = RandomSampler(a_)
__a : str = DataLoader(a_ , sampler=a_ , batch_size=args.train_batch_size)
__a : List[str] = TensorDataset(*a_)
__a : Optional[int] = SequentialSampler(a_)
__a : Optional[Any] = DataLoader(a_ , sampler=a_ , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a : int = args.max_steps
__a : Optional[int] = args.max_steps // (len(a_) // args.gradient_accumulation_steps) + 1
else:
__a : str = len(a_) // args.gradient_accumulation_steps * args.num_train_epochs
__a : List[Any] = list(model.named_parameters())
__a : Optional[int] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a : List[str] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], '''weight_decay''': 0.0},
]
__a : int = AdamW(a_ , lr=args.learning_rate , eps=args.adam_epsilon)
__a : Union[str, Any] = get_linear_schedule_with_warmup(
a_ , num_warmup_steps=args.warmup_steps , num_training_steps=a_)
if args.do_train:
__a , __a , __a : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc='''Epoch'''):
__a : Dict = 0
__a : Dict = 0
__a : List[str] = tqdm(a_ , desc='''Training''')
for step, batch in enumerate(a_):
__a : Dict = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : str = batch
__a : List[Any] = model(a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a : int = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a : Tuple = '''Training loss: {:.2e} lr: {:.2e}'''.format(a_ , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a : Dict = model.module if hasattr(a_ , '''module''') else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a : int = os.path.join(args.output_dir , a_)
__a : str = os.path.join(args.output_dir , a_)
torch.save(model_to_save.state_dict() , a_)
model_to_save.config.to_json_file(a_)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
__a : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(a_)
if args.do_eval:
model.eval()
__a , __a : List[Any] = 0, 0
__a , __a : Union[str, Any] = 0, 0
for batch in tqdm(a_ , desc='''Evaluating'''):
__a : str = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : List[Any] = batch
with torch.no_grad():
__a , __a , __a , __a : str = model(
a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : List[str] = mc_logits.detach().cpu().numpy()
__a : Optional[Any] = mc_labels.to('''cpu''').numpy()
__a : str = accuracy(a_ , a_)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
__a : Tuple = eval_loss / nb_eval_steps
__a : List[str] = eval_accuracy / nb_eval_examples
__a : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
__a : List[str] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a : Dict = os.path.join(args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key in sorted(result.keys()):
logger.info(''' %s = %s''' , a_ , str(result[key]))
writer.write('''%s = %s\n''' % (key, str(result[key])))
if __name__ == "__main__":
main()
| 52 | 1 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
A = ''''''
A = ''''''
A = ''''''
A = 1 # (0 is vertical, 1 is horizontal)
def __A ( ) -> None:
__a , __a : str = get_dataset(a_ , a_)
print('''Processing...''')
__a , __a , __a : int = update_image_and_anno(a_ , a_ , a_)
for index, image in enumerate(a_):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__a : Optional[Any] = random_chars(32)
__a : List[Any] = paths[index].split(os.sep)[-1].rsplit('''.''' , 1)[0]
__a : int = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , a_ , [cva.IMWRITE_JPEG_QUALITY, 85])
print(F"""Success {index+1}/{len(a_)} with {file_name}""")
__a : Tuple = []
for anno in new_annos[index]:
__a : Tuple = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(a_)
with open(F"""/{file_root}.txt""" , '''w''') as outfile:
outfile.write('''\n'''.join(line for line in annos_list))
def __A ( a_ :str , a_ :str) -> tuple[list, list]:
__a : List[str] = []
__a : Any = []
for label_file in glob.glob(os.path.join(a_ , '''*.txt''')):
__a : Optional[int] = label_file.split(os.sep)[-1].rsplit('''.''' , 1)[0]
with open(a_) as in_file:
__a : Tuple = in_file.readlines()
__a : Dict = os.path.join(a_ , F"""{label_name}.jpg""")
__a : List[str] = []
for obj_list in obj_lists:
__a : List[str] = obj_list.rstrip('''\n''').split(''' ''')
boxes.append(
[
int(obj[0]),
float(obj[1]),
float(obj[2]),
float(obj[3]),
float(obj[4]),
])
if not boxes:
continue
img_paths.append(a_)
labels.append(a_)
return img_paths, labels
def __A ( a_ :list , a_ :list , a_ :int = 1) -> tuple[list, list, list]:
__a : Any = []
__a : Tuple = []
__a : Dict = []
for idx in range(len(a_)):
__a : Optional[Any] = []
__a : Optional[int] = img_list[idx]
path_list.append(a_)
__a : Optional[Any] = anno_list[idx]
__a : Any = cva.imread(a_)
if flip_type == 1:
__a : Tuple = cva.flip(a_ , a_)
for bbox in img_annos:
__a : Any = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]])
elif flip_type == 0:
__a : Optional[int] = cva.flip(a_ , a_)
for bbox in img_annos:
__a : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]])
new_annos_lists.append(a_)
new_imgs_list.append(a_)
return new_imgs_list, new_annos_lists, path_list
def __A ( a_ :int = 32) -> str:
assert number_char > 1, "The number of character should greater than 1"
__a : Dict = ascii_lowercase + digits
return "".join(random.choice(a_) for _ in range(a_))
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 52 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : List[str] = is_training
__a : Optional[Any] = use_attention_mask
__a : Optional[Any] = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Dict = intermediate_size
__a : List[str] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : Optional[int] = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52 | 1 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , **_UpperCAmelCase , ):
super().__init__(features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , **_UpperCAmelCase )
__a : List[Any] = Sql(
cache_dir=_UpperCAmelCase , features=_UpperCAmelCase , sql=_UpperCAmelCase , con=_UpperCAmelCase , **_UpperCAmelCase , )
def _lowerCamelCase ( self ):
__a : Any = None
__a : Tuple = None
__a : str = None
__a : Any = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , )
# Build dataset for splits
__a : List[str] = self.builder.as_dataset(
split='''train''' , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__a : Dict = dataset
__a : Optional[Any] = name
__a : Tuple = con
__a : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__a : Tuple = num_proc
__a : List[str] = to_sql_kwargs
def _lowerCamelCase ( self ):
__a : List[Any] = self.to_sql_kwargs.pop('''sql''' , _UpperCAmelCase )
__a : Dict = self.to_sql_kwargs.pop('''con''' , _UpperCAmelCase )
__a : Tuple = self.to_sql_kwargs.pop('''index''' , _UpperCAmelCase )
__a : List[Any] = self._write(index=_UpperCAmelCase , **self.to_sql_kwargs )
return written
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a , __a , __a : Optional[Any] = args
__a : Union[str, Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__a : Any = query_table(
table=self.dataset.data , key=slice(_UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
__a : Dict = batch.to_pandas()
__a : Union[str, Any] = df.to_sql(self.name , self.con , index=_UpperCAmelCase , **_UpperCAmelCase )
return num_rows or len(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ):
__a : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__a , __a : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _UpperCAmelCase , _UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 52 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''levit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 8, 12] , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = image_size
__a : List[Any] = num_channels
__a : Dict = kernel_size
__a : Optional[int] = stride
__a : Optional[int] = padding
__a : Dict = hidden_sizes
__a : int = num_attention_heads
__a : Optional[int] = depths
__a : str = key_dim
__a : Union[str, Any] = drop_path_rate
__a : Optional[Any] = patch_size
__a : Tuple = attention_ratio
__a : int = mlp_ratio
__a : int = initializer_range
__a : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4
| 52 | 1 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def __A ( a_ :int) -> str:
__a : Any = np.max(a_ , axis=-1 , keepdims=a_)
__a : Optional[int] = np.exp(outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=a_)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : int = {}
if "second_text" in kwargs:
__a : Optional[Any] = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
return self.tokenizer(_UpperCAmelCase , text_pair=_UpperCAmelCase , return_tensors=self.framework )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.model(**_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : List[str] = model_outputs.logits[0].numpy()
__a : Dict = softmax(_UpperCAmelCase )
__a : str = np.argmax(_UpperCAmelCase )
__a : Optional[Any] = self.model.config.idalabel[best_class]
__a : Optional[Any] = probabilities[best_class].item()
__a : Dict = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 52 |
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 52 | 1 |
"""simple docstring"""
from manim import *
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Dict = Rectangle(height=0.5 , width=0.5 )
__a : List[str] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__a : str = [mem.copy() for i in range(6 )]
__a : Dict = [mem.copy() for i in range(6 )]
__a : Dict = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__a : List[Any] = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__a : Optional[Any] = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__a : Any = Text('''CPU''' , font_size=24 )
__a : Optional[Any] = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
__a : List[Any] = [mem.copy() for i in range(1 )]
__a : Optional[Any] = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__a : Optional[Any] = Text('''GPU''' , font_size=24 )
__a : int = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.align_to(_UpperCAmelCase , _UpperCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_UpperCAmelCase )
__a : List[str] = [mem.copy() for i in range(6 )]
__a : List[Any] = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__a : Optional[int] = Text('''Model''' , font_size=24 )
__a : Optional[int] = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_UpperCAmelCase , run_time=1 ) , Create(_UpperCAmelCase , run_time=1 ) , Create(_UpperCAmelCase , run_time=1 ) , )
__a : Optional[int] = MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
__a : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : str = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=2.5 ) , Write(_UpperCAmelCase ) , Write(_UpperCAmelCase ) )
self.add(_UpperCAmelCase )
__a : int = []
__a : Optional[int] = []
__a : str = []
for i, rect in enumerate(_UpperCAmelCase ):
__a : str = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.7 )
cpu_target.move_to(_UpperCAmelCase )
cpu_target.generate_target()
__a : Tuple = 0.4_6 / 4
__a : Union[str, Any] = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=_UpperCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_UpperCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_UpperCAmelCase , buff=0.0 )
cpu_targs.append(_UpperCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_UpperCAmelCase ) )
second_animations.append(MoveToTarget(_UpperCAmelCase , run_time=1.5 ) )
self.play(*_UpperCAmelCase )
self.play(*_UpperCAmelCase )
self.wait()
| 52 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Dict = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : List[str] = edge
__a : Optional[int] = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Optional[int] = f.read().strip().split('''\n''')
__a : Dict = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : Tuple = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 52 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A = logging.get_logger(__name__)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ['''pixel_values''']
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = IMAGENET_DEFAULT_MEAN , _UpperCAmelCase = IMAGENET_DEFAULT_STD , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = size if size is not None else {'''shortest_edge''': 224}
__a : str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__a : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__a : Tuple = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' )
__a : Tuple = do_resize
__a : Optional[int] = size
__a : List[Any] = resample
__a : List[str] = do_center_crop
__a : Dict = crop_size
__a : Union[str, Any] = do_rescale
__a : int = rescale_factor
__a : int = do_normalize
__a : Optional[int] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a : Any = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__a : str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__a : str = int((256 / 224) * size['''shortest_edge'''] )
__a : Tuple = get_resize_output_image_size(_UpperCAmelCase , size=_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__a : Tuple = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_UpperCAmelCase , size=(size_dict['''height'''], size_dict['''width''']) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__a : List[Any] = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
__a : str = do_resize if do_resize is not None else self.do_resize
__a : List[str] = resample if resample is not None else self.resample
__a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__a : Any = do_rescale if do_rescale is not None else self.do_rescale
__a : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__a : str = do_normalize if do_normalize is not None else self.do_normalize
__a : List[str] = image_mean if image_mean is not None else self.image_mean
__a : List[Any] = image_std if image_std is not None else self.image_std
__a : Optional[Any] = size if size is not None else self.size
__a : Optional[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__a : Optional[int] = crop_size if crop_size is not None else self.crop_size
__a : int = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' )
__a : int = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__a : Union[str, Any] = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__a : Union[str, Any] = [self.resize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for image in images]
if do_center_crop:
__a : str = [self.center_crop(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
if do_rescale:
__a : Optional[int] = [self.rescale(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
if do_normalize:
__a : Union[str, Any] = [self.normalize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for image in images]
__a : Tuple = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__a : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''trocr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
__a : List[str] = vocab_size
__a : Optional[Any] = d_model
__a : Optional[Any] = decoder_layers
__a : Union[str, Any] = decoder_attention_heads
__a : int = decoder_ffn_dim
__a : List[Any] = activation_function
__a : Any = max_position_embeddings
__a : Dict = dropout
__a : List[Any] = attention_dropout
__a : Optional[Any] = activation_dropout
__a : str = init_std
__a : List[str] = decoder_layerdrop
__a : Union[str, Any] = use_cache
__a : Optional[Any] = scale_embedding
__a : List[Any] = use_learned_position_embeddings
__a : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 52 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __A ( a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Optional[Any] , a_ :Optional[int]=5) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''') == 1
__a : Optional[Any] = torch.tensor(tokenizer.encode(a_ , add_special_tokens=a_)).unsqueeze(0) # Batch size 1
__a : Dict = model(a_)[0] # The last hidden-state is the first element of the output tuple
__a : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__a : Any = logits[0, masked_index, :]
__a : Any = logits.softmax(dim=0)
__a , __a : Optional[Any] = prob.topk(k=a_ , dim=0)
__a : Optional[int] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(a_))])
__a : List[str] = tokenizer.mask_token
__a : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''')):
__a : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''')
if " {0}".format(a_) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(a_) , a_),
values[index].item(),
predicted_token,
))
else:
topk_filled_outputs.append(
(
masked_input.replace(a_ , a_),
values[index].item(),
predicted_token,
))
return topk_filled_outputs
A = CamembertTokenizer.from_pretrained('''camembert-base''')
A = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
A = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 52 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=10 , _UpperCAmelCase=[10, 20, 30, 40] , _UpperCAmelCase=[1, 1, 2, 1] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=3 , _UpperCAmelCase=None , ):
__a : int = parent
__a : Dict = batch_size
__a : str = image_size
__a : str = num_channels
__a : Union[str, Any] = embeddings_size
__a : Any = hidden_sizes
__a : Optional[Any] = depths
__a : Any = is_training
__a : List[Any] = use_labels
__a : Tuple = hidden_act
__a : Union[str, Any] = num_labels
__a : str = scope
__a : List[Any] = len(_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : int = None
if self.use_labels:
__a : int = ids_tensor([self.batch_size] , self.num_labels )
__a : Dict = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = TFResNetModel(config=_UpperCAmelCase )
__a : Optional[int] = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = self.num_labels
__a : Any = TFResNetForImageClassification(_UpperCAmelCase )
__a : Optional[int] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self ):
__a : Tuple = self.prepare_config_and_inputs()
__a , __a , __a : Union[str, Any] = config_and_inputs
__a : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__lowerCAmelCase = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : Union[str, Any] = TFResNetModelTester(self )
__a : List[str] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self ):
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_UpperCAmelCase )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Dict = [*signature.parameters.keys()]
__a : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = model_class(_UpperCAmelCase )
__a : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__a : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : Tuple = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__a : str = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__a : Optional[Any] = layer_type
__a : str = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = TFResNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __A ( ) -> List[str]:
__a : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
__a : Optional[Any] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__a : Optional[int] = self.default_image_processor
__a : Optional[int] = prepare_img()
__a : int = image_processor(images=_UpperCAmelCase , return_tensors='''tf''' )
# forward pass
__a : List[str] = model(**_UpperCAmelCase )
# verify the logits
__a : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__a : List[Any] = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _UpperCAmelCase , atol=1e-4 ) )
| 52 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[int] = [10, 20, 30, 40, 50, 60]
__a : Union[str, Any] = [2, 4, 6, 8, 10, 12]
__a : List[str] = 100
self.assertEqual(kp.calc_profit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 210 )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Weight can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Profit can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(
_UpperCAmelCase , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52 | 1 |
"""simple docstring"""
def __A ( a_ :int , a_ :int) -> int:
return int((input_a, input_a).count(1) != 0)
def __A ( ) -> None:
assert or_gate(0 , 0) == 0
assert or_gate(0 , 1) == 1
assert or_gate(1 , 0) == 1
assert or_gate(1 , 1) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''llama'''
__lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32000 , _UpperCAmelCase=4096 , _UpperCAmelCase=11008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : Dict = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : str = hidden_size
__a : List[str] = intermediate_size
__a : Any = num_hidden_layers
__a : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__a : Union[str, Any] = num_attention_heads
__a : Optional[int] = num_key_value_heads
__a : Dict = hidden_act
__a : Union[str, Any] = initializer_range
__a : int = rms_norm_eps
__a : Optional[int] = pretraining_tp
__a : Optional[Any] = use_cache
__a : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__a : Tuple = self.rope_scaling.get('''type''' , _UpperCAmelCase )
__a : Optional[int] = self.rope_scaling.get('''factor''' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 52 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
A = logging.get_logger(__name__)
A = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''longformer'''
def __init__( self , _UpperCAmelCase = 512 , _UpperCAmelCase = 2 , _UpperCAmelCase = 1 , _UpperCAmelCase = 0 , _UpperCAmelCase = 2 , _UpperCAmelCase = 30522 , _UpperCAmelCase = 768 , _UpperCAmelCase = 12 , _UpperCAmelCase = 12 , _UpperCAmelCase = 3072 , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 512 , _UpperCAmelCase = 2 , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 1e-1_2 , _UpperCAmelCase = False , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__a : Union[str, Any] = attention_window
__a : List[Any] = sep_token_id
__a : Optional[Any] = bos_token_id
__a : List[str] = eos_token_id
__a : List[Any] = vocab_size
__a : Optional[Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Optional[int] = num_attention_heads
__a : Optional[Any] = hidden_act
__a : Dict = intermediate_size
__a : Dict = hidden_dropout_prob
__a : Optional[int] = attention_probs_dropout_prob
__a : Tuple = max_position_embeddings
__a : List[Any] = type_vocab_size
__a : int = initializer_range
__a : Optional[int] = layer_norm_eps
__a : str = onnx_export
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a : List[Any] = True
@property
def _lowerCamelCase ( self ):
if self.task == "multiple-choice":
__a : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _lowerCamelCase ( self ):
__a : str = super().outputs
if self.task == "default":
__a : Optional[Any] = {0: '''batch'''}
return outputs
@property
def _lowerCamelCase ( self ):
return 1e-4
@property
def _lowerCamelCase ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
__a : Dict = super().generate_dummy_inputs(
preprocessor=_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__a : str = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
__a : int = 1
return inputs
| 52 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__a : int = parent
__a : str = batch_size
__a : List[Any] = num_channels
__a : Union[str, Any] = image_size
__a : List[Any] = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 20}
__a : str = do_thumbnail
__a : str = do_align_axis
__a : Dict = do_pad
__a : Union[str, Any] = do_normalize
__a : List[str] = image_mean
__a : Optional[int] = image_std
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : Tuple = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCamelCase ( self ):
pass
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : str = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 52 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''bit'''
__lowerCAmelCase = ['''preactivation''', '''bottleneck''']
__lowerCAmelCase = ['''SAME''', '''VALID''']
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=64 , _UpperCAmelCase=[256, 512, 1024, 2048] , _UpperCAmelCase=[3, 4, 6, 3] , _UpperCAmelCase="preactivation" , _UpperCAmelCase="relu" , _UpperCAmelCase=None , _UpperCAmelCase=32 , _UpperCAmelCase=0.0 , _UpperCAmelCase=False , _UpperCAmelCase=32 , _UpperCAmelCase=1 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__a : str = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
__a : List[Any] = num_channels
__a : str = embedding_size
__a : Optional[Any] = hidden_sizes
__a : str = depths
__a : Optional[int] = layer_type
__a : str = hidden_act
__a : int = global_padding
__a : List[Any] = num_groups
__a : List[str] = drop_path_rate
__a : Tuple = embedding_dynamic_padding
__a : Tuple = output_stride
__a : Union[str, Any] = width_factor
__a : Optional[Any] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(_UpperCAmelCase ) + 1 )]
__a , __a : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 52 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int]) -> int:
if not nums:
return 0
__a : Any = nums[0]
__a : Optional[Any] = 0
for num in nums[1:]:
__a , __a : Optional[Any] = (
max_excluding + num,
max(a_ , a_),
)
return max(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = None
__lowerCAmelCase = None
def __A ( ) -> Node | None:
__a : List[str] = Node(1)
__a : Any = Node(2)
__a : Tuple = Node(3)
__a : str = Node(4)
__a : Dict = Node(5)
return tree
def __A ( a_ :Node | None) -> list[int]:
return [root.data, *preorder(root.left), *preorder(root.right)] if root else []
def __A ( a_ :Node | None) -> list[int]:
return postorder(root.left) + postorder(root.right) + [root.data] if root else []
def __A ( a_ :Node | None) -> list[int]:
return [*inorder(root.left), root.data, *inorder(root.right)] if root else []
def __A ( a_ :Node | None) -> int:
return (max(height(root.left) , height(root.right)) + 1) if root else 0
def __A ( a_ :Node | None) -> Sequence[Node | None]:
__a : list[Any] = []
if root is None:
return output
__a : List[Any] = deque([root])
while process_queue:
__a : str = process_queue.popleft()
output.append(node.data)
if node.left:
process_queue.append(node.left)
if node.right:
process_queue.append(node.right)
return output
def __A ( a_ :Node | None , a_ :int) -> Sequence[Node | None]:
__a : list[Any] = []
def populate_output(a_ :Node | None , a_ :int) -> None:
if not root:
return
if level == 1:
output.append(root.data)
elif level > 1:
populate_output(root.left , level - 1)
populate_output(root.right , level - 1)
populate_output(a_ , a_)
return output
def __A ( a_ :Node | None , a_ :int) -> Sequence[Node | None]:
__a : list[Any] = []
def populate_output(a_ :Node | None , a_ :int) -> None:
if root is None:
return
if level == 1:
output.append(root.data)
elif level > 1:
populate_output(root.right , level - 1)
populate_output(root.left , level - 1)
populate_output(a_ , a_)
return output
def __A ( a_ :Node | None) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
__a : list[Sequence[Node | None]] = []
__a : int = 0
__a : Optional[int] = height(a_)
for h in range(1 , height_tree + 1):
if not flag:
output.append(get_nodes_from_left_to_right(a_ , a_))
__a : Any = 1
else:
output.append(get_nodes_from_right_to_left(a_ , a_))
__a : Optional[int] = 0
return output
def __A ( ) -> None: # Main function for testing.
__a : str = make_tree()
print(F"""In-order Traversal: {inorder(a_)}""")
print(F"""Pre-order Traversal: {preorder(a_)}""")
print(F"""Post-order Traversal: {postorder(a_)}""" , '''\n''')
print(F"""Height of Tree: {height(a_)}""" , '''\n''')
print('''Complete Level Order Traversal: ''')
print(level_order(a_) , '''\n''')
print('''Level-wise order Traversal: ''')
for level in range(1 , height(a_) + 1):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(a_ , level=a_))
print('''\nZigZag order Traversal: ''')
print(zigzag(a_))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 52 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A = '''▁'''
A = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BigBirdTokenizer
__lowerCAmelCase = BigBirdTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def _lowerCamelCase ( self ):
super().setUp()
__a : Dict = self.tokenizer_class(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : List[str] = '''<s>'''
__a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_UpperCAmelCase ) , 1004 )
def _lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__a : Dict = self.get_tokenizer()
__a : Any = self.get_rust_tokenizer()
__a : int = '''I was born in 92000, and this is falsé.'''
__a : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
__a : List[str] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__a : Any = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = self.get_rust_tokenizer()
__a : Tuple = tokenizer.encode(_UpperCAmelCase )
__a : List[Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = BigBirdTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
__a : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
__a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _lowerCamelCase ( self ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def _lowerCamelCase ( self ):
__a : str = '''Hello World!'''
__a : str = [65, 18536, 2260, 101, 66]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def _lowerCamelCase ( self ):
__a : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__a : Optional[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__a : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__a : List[str] = ''' '''.join(_UpperCAmelCase )
__a : Tuple = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Optional[Any] = BigBirdConfig(attention_type='''original_full''' )
__a : Tuple = BigBirdModel(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
__a : Union[str, Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__a : List[Any] = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def _lowerCamelCase ( self ):
# fmt: off
__a : Optional[Any] = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 52 | 1 |
"""simple docstring"""
def __A ( a_ :int = 60_08_51_47_51_43) -> int:
try:
__a : Dict = int(a_)
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''')
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''')
__a : List[str] = 1
__a : List[str] = 2
while i * i <= n:
while n % i == 0:
__a : Dict = i
n //= i
i += 1
if n > 1:
__a : Any = n
return int(a_)
if __name__ == "__main__":
print(F'{solution() = }')
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''convnextv2'''
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=224 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : List[str] = num_channels
__a : str = patch_size
__a : Dict = num_stages
__a : List[str] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__a : List[str] = [3, 3, 9, 3] if depths is None else depths
__a : List[Any] = hidden_act
__a : Any = initializer_range
__a : Optional[int] = layer_norm_eps
__a : List[Any] = drop_path_rate
__a : Any = image_size
__a : str = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a : Optional[int] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 52 | 1 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Any = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
__a : Any = self.transformer_dir
shutil.copy(
os.path.join(_UpperCAmelCase , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def _lowerCamelCase ( self ):
__a : Optional[int] = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
__a : Union[str, Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
__a : Optional[int] = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
__a : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
__a : Dict = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase )
__a : int = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(_UpperCAmelCase , '''w''' , newline='''\n''' ) as f:
f.write(_UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase )
with open(_UpperCAmelCase , '''r''' ) as f:
self.assertTrue(f.read() , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , _UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , _UpperCAmelCase ) , )
# Copy consistency with a really long name
__a : Tuple = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , f"""{long_class_name}LMPredictionHead""" , re.sub('''Bert''' , _UpperCAmelCase , _UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , _UpperCAmelCase , overwrite_result=re.sub('''Bert''' , '''TestModel''' , _UpperCAmelCase ) , )
def _lowerCamelCase ( self ):
__a : int = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
__a : Optional[int] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
__a : Optional[int] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
__a : List[str] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
__a , __a : Optional[Any] = check_copies.convert_to_localized_md(
_UpperCAmelCase , _UpperCAmelCase , localized_readme['''format_model_list'''] )
self.assertFalse(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
__a , __a : str = check_copies.convert_to_localized_md(
_UpperCAmelCase , _UpperCAmelCase , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_UpperCAmelCase )
__a : Union[str, Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
__a : List[Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
__a : Dict = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
__a , __a : Tuple = check_copies.convert_to_localized_md(
_UpperCAmelCase , _UpperCAmelCase , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 52 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (DDPMScheduler,)
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : int = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_UpperCAmelCase )
return config
def _lowerCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Dict = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def _lowerCamelCase ( self ):
__a : int = self.scheduler_classes[0]
__a : int = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**_UpperCAmelCase )
__a : int = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[Any] = self.dummy_sample_deter
__a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Optional[int] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : List[Any] = pred_prev_sample
__a : int = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = self.scheduler_classes[0]
__a : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a : int = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[str] = self.dummy_sample_deter
__a : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Dict = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : Optional[int] = pred_prev_sample
__a : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Any = self.get_scheduler_config()
__a : str = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
__a : List[Any] = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
__a : Union[str, Any] = -1
else:
__a : str = timesteps[i + 1]
__a : Dict = scheduler.previous_timestep(_UpperCAmelCase )
__a : str = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
__a : Optional[int] = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : List[str] = scheduler_class(**_UpperCAmelCase )
__a : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 52 | 1 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __A ( a_ :Dataset , a_ :Dict[str, str]) -> int:
__a : str = args.log_outputs
__a : Union[str, Any] = '''_'''.join(args.dataset.split('''/''') + [args.config, args.split])
# load metric
__a : List[Any] = load_metric('''wer''')
__a : int = load_metric('''cer''')
# compute metrics
__a : Any = wer.compute(references=result['''target'''] , predictions=result['''prediction'''])
__a : List[str] = cer.compute(references=result['''target'''] , predictions=result['''prediction'''])
# print & log results
__a : Any = F"""WER: {wer_result}\nCER: {cer_result}"""
print(a_)
with open(F"""{dataset_id}_eval_results.txt""" , '''w''') as f:
f.write(a_)
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__a : int = F"""log_{dataset_id}_predictions.txt"""
__a : List[str] = F"""log_{dataset_id}_targets.txt"""
with open(a_ , '''w''') as p, open(a_ , '''w''') as t:
# mapping function to write output
def write_to_file(a_ :Union[str, Any] , a_ :List[Any]):
p.write(F"""{i}""" + '''\n''')
p.write(batch['''prediction'''] + '''\n''')
t.write(F"""{i}""" + '''\n''')
t.write(batch['''target'''] + '''\n''')
result.map(a_ , with_indices=a_)
def __A ( a_ :str) -> str:
__a : int = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__a : Optional[int] = re.sub(a_ , '''''' , text.lower())
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__a : Dict = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
__a : Optional[int] = ''' '''.join(text.split(a_))
return text
def __A ( a_ :int) -> List[str]:
# load dataset
__a : List[str] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=a_)
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__a : Union[str, Any] = AutoFeatureExtractor.from_pretrained(args.model_id)
__a : str = feature_extractor.sampling_rate
# resample audio
__a : Tuple = dataset.cast_column('''audio''' , Audio(sampling_rate=a_))
# load eval pipeline
if args.device is None:
__a : Dict = 0 if torch.cuda.is_available() else -1
__a : Optional[int] = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device)
# map function to decode audio
def map_to_pred(a_ :List[Any]):
__a : Optional[Any] = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s)
__a : List[str] = prediction['''text''']
__a : List[str] = normalize_text(batch['''sentence'''])
return batch
# run inference on all examples
__a : List[str] = dataset.map(a_ , remove_columns=dataset.column_names)
# compute and log_results
# do not change function below
log_results(a_ , a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
A = parser.parse_args()
main(args)
| 52 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
A = random.Random()
def __A ( a_ :Tuple , a_ :Dict=1.0 , a_ :str=None , a_ :List[Any]=None) -> Dict:
if rng is None:
__a : Any = global_rng
__a : Tuple = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=2048 , _UpperCAmelCase=128 , _UpperCAmelCase=1 , _UpperCAmelCase=512 , _UpperCAmelCase=30 , _UpperCAmelCase=44100 , ):
__a : Any = parent
__a : Tuple = batch_size
__a : Tuple = min_seq_length
__a : List[str] = max_seq_length
__a : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a : Tuple = spectrogram_length
__a : int = feature_size
__a : int = num_audio_channels
__a : Tuple = hop_length
__a : List[Any] = chunk_length
__a : Any = sampling_rate
def _lowerCamelCase ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
__a : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : Optional[Any] = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = TvltFeatureExtractor
def _lowerCamelCase ( self ):
__a : Optional[Any] = TvltFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
__a : int = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''hop_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''sampling_rate''' ) )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[str] = feat_extract_first.save_pretrained(_UpperCAmelCase )[0]
check_json_file_has_correct_format(_UpperCAmelCase )
__a : Union[str, Any] = self.feature_extraction_class.from_pretrained(_UpperCAmelCase )
__a : Tuple = feat_extract_first.to_dict()
__a : List[Any] = feat_extract_second.to_dict()
__a : int = dict_first.pop('''mel_filters''' )
__a : List[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : int = os.path.join(_UpperCAmelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(_UpperCAmelCase )
__a : Optional[Any] = self.feature_extraction_class.from_json_file(_UpperCAmelCase )
__a : Optional[Any] = feat_extract_first.to_dict()
__a : Any = feat_extract_second.to_dict()
__a : Optional[Any] = dict_first.pop('''mel_filters''' )
__a : Dict = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
# Initialize feature_extractor
__a : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Union[str, Any] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__a : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__a : int = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__a : List[Any] = feature_extractor(
_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 , mask_audio=_UpperCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__a : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a : Any = np.asarray(_UpperCAmelCase )
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__a : int = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
__a : List[str] = self._load_datasamples(1 )
__a : Tuple = TvltFeatureExtractor()
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__a : Dict = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _UpperCAmelCase , atol=1e-4 ) )
| 52 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : int = (3, 32, 128)
__a : List[str] = tempfile.mkdtemp()
# fmt: off
__a : List[str] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__a : Optional[int] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
__a : Any = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
__a : int = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : List[str] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__a : Optional[Any] = Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) )
return image_input
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.get_tokenizer()
__a : List[str] = self.get_image_processor()
__a : Dict = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__a : List[Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.get_tokenizer()
__a : List[str] = self.get_image_processor()
__a : Optional[int] = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__a : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__a : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__a : List[str] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.get_image_processor()
__a : Optional[Any] = self.get_tokenizer()
__a : Any = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__a : Optional[Any] = self.prepare_image_inputs()
__a : int = image_processor(_UpperCAmelCase , return_tensors='''np''' )
__a : str = processor(images=_UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCamelCase ( self ):
__a : Any = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : str = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__a : List[Any] = '''test'''
__a : Optional[Any] = processor(text=_UpperCAmelCase )
__a : Any = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCamelCase ( self ):
__a : List[Any] = self.get_image_processor()
__a : Any = self.get_tokenizer()
__a : int = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__a : str = '''test'''
__a : Union[str, Any] = self.prepare_image_inputs()
__a : Tuple = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def _lowerCamelCase ( self ):
__a : Optional[int] = self.get_image_processor()
__a : Optional[int] = self.get_tokenizer()
__a : Optional[int] = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__a : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__a : int = processor.char_decode(_UpperCAmelCase )
__a : str = tokenizer.batch_decode(_UpperCAmelCase )
__a : List[Any] = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Any = self.get_image_processor()
__a : str = self.get_tokenizer()
__a : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__a : Tuple = None
__a : Dict = self.prepare_image_inputs()
__a : Optional[Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def _lowerCamelCase ( self ):
__a : str = self.get_image_processor()
__a : str = self.get_tokenizer()
__a : Any = MgpstrProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__a : Optional[Any] = torch.randn(1 , 27 , 38 )
__a : Optional[Any] = torch.randn(1 , 27 , 50257 )
__a : List[str] = torch.randn(1 , 27 , 30522 )
__a : Dict = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 52 |
"""simple docstring"""
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : List[Any] = text, pattern
__a , __a : Tuple = len(_UpperCAmelCase ), len(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self ):
# searches pattern in text and returns index positions
__a : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
__a : List[str] = self.mismatch_in_text(_UpperCAmelCase )
if mismatch_index == -1:
positions.append(_UpperCAmelCase )
else:
__a : Tuple = self.match_in_pattern(self.text[mismatch_index] )
__a : Optional[int] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A = '''ABAABA'''
A = '''AB'''
A = BoyerMooreSearch(text, pattern)
A = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 52 | 1 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int] , a_ :int) -> list[int]:
__a : int = 0
__a : Union[str, Any] = len(a_) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__a : Optional[Any] = i + 1
else:
__a : Optional[int] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }')
| 52 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
A = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
A = F'https://www.google.com/search?q={query}&num=100'
A = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
A = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
A = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
__a : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _lowerCamelCase ( self ):
__a : Tuple = self.dummy_uncond_unet
__a : Union[str, Any] = ScoreSdeVeScheduler()
__a : Dict = ScoreSdeVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
sde_ve.to(_UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Union[str, Any] = torch.manual_seed(0 )
__a : Union[str, Any] = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_UpperCAmelCase ).images
__a : Optional[int] = torch.manual_seed(0 )
__a : Tuple = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=_UpperCAmelCase , return_dict=_UpperCAmelCase )[
0
]
__a : int = image[0, -3:, -3:, -1]
__a : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__a : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Union[str, Any] = '''google/ncsnpp-church-256'''
__a : List[Any] = UNetaDModel.from_pretrained(_UpperCAmelCase )
__a : Optional[Any] = ScoreSdeVeScheduler.from_pretrained(_UpperCAmelCase )
__a : Any = ScoreSdeVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
sde_ve.to(_UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[str] = torch.manual_seed(0 )
__a : Dict = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=_UpperCAmelCase ).images
__a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__a : List[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 52 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = False
__lowerCAmelCase = 3.0
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a : List[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__a : int = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _UpperCAmelCase )
@require_multi_gpu
def _lowerCamelCase ( self ):
__a : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A = Accelerator(kwargs_handlers=[ddp_scaler])
A = torch.nn.Linear(100, 200)
A = accelerator.prepare(model)
# Check the values changed in kwargs
A = ''''''
A = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 52 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def __A ( a_ :str) -> YolosConfig:
__a : Tuple = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__a : Tuple = 1_92
__a : List[Any] = 7_68
__a : List[Any] = 12
__a : Tuple = 3
__a : Any = [8_00, 13_33]
__a : List[str] = False
elif yolos_name == "yolos_s_dWr":
__a : Tuple = 3_30
__a : Union[str, Any] = 14
__a : Dict = 6
__a : Dict = 13_20
elif "yolos_s" in yolos_name:
__a : Optional[Any] = 3_84
__a : List[str] = 15_36
__a : List[Any] = 12
__a : Optional[int] = 6
elif "yolos_b" in yolos_name:
__a : Union[str, Any] = [8_00, 13_44]
__a : Optional[Any] = 91
__a : List[str] = '''huggingface/label-files'''
__a : str = '''coco-detection-id2label.json'''
__a : List[str] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
__a : Tuple = {int(a_): v for k, v in idalabel.items()}
__a : Dict = idalabel
__a : Dict = {v: k for k, v in idalabel.items()}
return config
def __A ( a_ :dict , a_ :YolosConfig , a_ :bool = False) -> Optional[Any]:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a : Union[str, Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""")
__a : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
__a : str = in_proj_weight[: config.hidden_size, :]
__a : int = in_proj_bias[: config.hidden_size]
__a : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a : str = in_proj_weight[-config.hidden_size :, :]
__a : List[str] = in_proj_bias[-config.hidden_size :]
def __A ( a_ :str) -> str:
if "backbone" in name:
__a : Union[str, Any] = name.replace('''backbone''' , '''vit''')
if "cls_token" in name:
__a : List[str] = name.replace('''cls_token''' , '''embeddings.cls_token''')
if "det_token" in name:
__a : Any = name.replace('''det_token''' , '''embeddings.detection_tokens''')
if "mid_pos_embed" in name:
__a : Any = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''')
if "pos_embed" in name:
__a : Optional[int] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''')
if "patch_embed.proj" in name:
__a : Union[str, Any] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''')
if "blocks" in name:
__a : Optional[Any] = name.replace('''blocks''' , '''encoder.layer''')
if "attn.proj" in name:
__a : Any = name.replace('''attn.proj''' , '''attention.output.dense''')
if "attn" in name:
__a : List[Any] = name.replace('''attn''' , '''attention.self''')
if "norm1" in name:
__a : List[Any] = name.replace('''norm1''' , '''layernorm_before''')
if "norm2" in name:
__a : List[Any] = name.replace('''norm2''' , '''layernorm_after''')
if "mlp.fc1" in name:
__a : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''')
if "mlp.fc2" in name:
__a : Optional[Any] = name.replace('''mlp.fc2''' , '''output.dense''')
if "class_embed" in name:
__a : int = name.replace('''class_embed''' , '''class_labels_classifier''')
if "bbox_embed" in name:
__a : str = name.replace('''bbox_embed''' , '''bbox_predictor''')
if "vit.norm" in name:
__a : Optional[Any] = name.replace('''vit.norm''' , '''vit.layernorm''')
return name
def __A ( a_ :dict , a_ :YolosForObjectDetection) -> dict:
for key in orig_state_dict.copy().keys():
__a : Dict = orig_state_dict.pop(a_)
if "qkv" in key:
__a : List[Any] = key.split('''.''')
__a : int = int(key_split[2])
__a : Any = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__a : Union[str, Any] = val[:dim, :]
__a : List[Any] = val[
dim : dim * 2, :
]
__a : str = val[-dim:, :]
else:
__a : Union[str, Any] = val[:dim]
__a : Any = val[dim : dim * 2]
__a : Union[str, Any] = val[-dim:]
else:
__a : int = val
return orig_state_dict
def __A ( ) -> torch.Tensor:
__a : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a : Optional[int] = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def __A ( a_ :str , a_ :str , a_ :str , a_ :bool = False) -> Union[str, Any]:
__a : List[Any] = get_yolos_config(a_)
# load original state_dict
__a : Union[str, Any] = torch.load(a_ , map_location='''cpu''')['''model''']
# load 🤗 model
__a : str = YolosForObjectDetection(a_)
model.eval()
__a : Optional[Any] = convert_state_dict(a_ , a_)
model.load_state_dict(a_)
# Check outputs on an image, prepared by YolosImageProcessor
__a : Dict = 8_00 if yolos_name != '''yolos_ti''' else 5_12
__a : Optional[Any] = YolosImageProcessor(format='''coco_detection''' , size=a_)
__a : str = image_processor(images=prepare_img() , return_tensors='''pt''')
__a : Optional[Any] = model(**a_)
__a , __a : Optional[int] = outputs.logits, outputs.pred_boxes
__a , __a : Dict = None, None
if yolos_name == "yolos_ti":
__a : Dict = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]])
__a : str = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]])
elif yolos_name == "yolos_s_200_pre":
__a : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]])
__a : str = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]])
elif yolos_name == "yolos_s_300_pre":
__a : Tuple = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]])
__a : Union[str, Any] = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]])
elif yolos_name == "yolos_s_dWr":
__a : str = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]])
__a : Union[str, Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]])
elif yolos_name == "yolos_base":
__a : Any = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]])
__a : int = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]])
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""")
assert torch.allclose(logits[0, :3, :3] , a_ , atol=1e-4)
assert torch.allclose(pred_boxes[0, :3, :3] , a_ , atol=1e-4)
Path(a_).mkdir(exist_ok=a_)
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(a_)
print(F"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(a_)
if push_to_hub:
__a : Optional[Any] = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''')
__a : Dict = model_mapping[yolos_name]
image_processor.push_to_hub(a_ , organization='''hustvl''')
model.push_to_hub(a_ , organization='''hustvl''')
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 52 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 | 1 |
"""simple docstring"""
from collections import defaultdict
def __A ( a_ :str , a_ :str) -> bool:
__a : List[Any] = first_str.lower().strip()
__a : str = second_str.lower().strip()
# Remove whitespace
__a : Dict = first_str.replace(''' ''' , '''''')
__a : Optional[Any] = second_str.replace(''' ''' , '''''')
# Strings of different lengths are not anagrams
if len(a_) != len(a_):
return False
# Default values for count should be 0
__a : defaultdict[str, int] = defaultdict(a_)
# For each character in input strings,
# increment count in the corresponding
for i in range(len(a_)):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values())
if __name__ == "__main__":
from doctest import testmod
testmod()
A = input('''Enter the first string ''').strip()
A = input('''Enter the second string ''').strip()
A = check_anagrams(input_a, input_b)
print(F'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 52 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A = get_logger(__name__)
A = Path(__file__).parent / '''model_card_template.md'''
A = uuida().hex
A = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __A ( a_ :Union[Dict, str, None] = None) -> str:
__a : Union[str, Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''').upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(a_ , a_):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items())
elif isinstance(a_ , a_):
ua += "; " + user_agent
return ua
def __A ( a_ :str , a_ :Optional[str] = None , a_ :Optional[str] = None) -> Optional[int]:
if token is None:
__a : Any = HfFolder.get_token()
if organization is None:
__a : List[Any] = whoami(a_)['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def __A ( a_ :Union[str, Any] , a_ :List[str]) -> Optional[Any]:
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''')
if hasattr(a_ , '''local_rank''') and args.local_rank not in [-1, 0]:
return
__a : int = args.hub_token if hasattr(a_ , '''hub_token''') else None
__a : Any = get_full_repo_name(a_ , token=a_)
__a : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=a_ , model_name=a_ , repo_name=a_ , dataset_name=args.dataset_name if hasattr(a_ , '''dataset_name''') else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(a_ , '''gradient_accumulation_steps''') else None
) , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta1''') else None , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta2''') else None , adam_weight_decay=args.adam_weight_decay if hasattr(a_ , '''adam_weight_decay''') else None , adam_epsilon=args.adam_epsilon if hasattr(a_ , '''adam_epsilon''') else None , lr_scheduler=args.lr_scheduler if hasattr(a_ , '''lr_scheduler''') else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(a_ , '''lr_warmup_steps''') else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(a_ , '''ema_inv_gamma''') else None , ema_power=args.ema_power if hasattr(a_ , '''ema_power''') else None , ema_max_decay=args.ema_max_decay if hasattr(a_ , '''ema_max_decay''') else None , mixed_precision=args.mixed_precision , )
__a : List[Any] = os.path.join(args.output_dir , '''README.md''')
model_card.save(a_)
def __A ( a_ :Optional[str] , a_ :Optional[str] = None) -> Union[str, Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
__a : Any = str(Path(a_).as_posix())
__a : Optional[int] = re.search(R'''snapshots/([^/]+)/''' , a_)
if search is None:
return None
__a : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(a_) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
A = os.path.join(hf_cache_home, '''diffusers''')
def __A ( a_ :Optional[str] = None , a_ :Optional[str] = None) -> None:
if new_cache_dir is None:
__a : Dict = DIFFUSERS_CACHE
if old_cache_dir is None:
__a : List[Any] = old_diffusers_cache
__a : Union[str, Any] = Path(a_).expanduser()
__a : Dict = Path(a_).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*'''):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__a : List[Any] = new_cache_dir / old_blob_path.relative_to(a_)
new_blob_path.parent.mkdir(parents=a_ , exist_ok=a_)
os.replace(a_ , a_)
try:
os.symlink(a_ , a_)
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''')
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
A = 0
else:
with open(cache_version_file) as f:
try:
A = int(f.read())
except ValueError:
A = 0
if cache_version < 1:
A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
A = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __A ( a_ :str , a_ :Optional[str] = None) -> str:
if variant is not None:
__a : Dict = weights_name.split('''.''')
__a : List[Any] = splits[:-1] + [variant] + splits[-1:]
__a : Tuple = '''.'''.join(a_)
return weights_name
def __A ( a_ :List[Any] , *,
a_ :Union[str, Any] , a_ :Dict , a_ :Union[str, Any] , a_ :Optional[int] , a_ :str , a_ :Any , a_ :str , a_ :Optional[int] , a_ :str , a_ :Tuple , a_ :List[str]=None , ) -> Dict:
__a : int = str(a_)
if os.path.isfile(a_):
return pretrained_model_name_or_path
elif os.path.isdir(a_):
if os.path.isfile(os.path.join(a_ , a_)):
# Load from a PyTorch checkpoint
__a : Union[str, Any] = os.path.join(a_ , a_)
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(a_ , a_ , a_)):
__a : Optional[Any] = os.path.join(a_ , a_ , a_)
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""")
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(a_).base_version) >= version.parse('''0.20.0''')
):
try:
__a : Any = hf_hub_download(
a_ , filename=_add_variant(a_ , a_) , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , a_ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(a_ , a_)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(a_ , a_)}' so that the correct variant file can be added.""" , a_ , )
try:
# 2. Load model file as usual
__a : Optional[Any] = hf_hub_download(
a_ , filename=a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''')
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""")
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""")
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""")
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''')
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""")
| 52 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''roberta'''
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__a : Dict = vocab_size
__a : Optional[int] = hidden_size
__a : Optional[int] = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : Tuple = hidden_act
__a : int = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : Dict = max_position_embeddings
__a : Optional[int] = type_vocab_size
__a : Union[str, Any] = initializer_range
__a : int = layer_norm_eps
__a : List[Any] = position_embedding_type
__a : Optional[int] = use_cache
__a : Optional[Any] = classifier_dropout
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
if self.task == "multiple-choice":
__a : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 52 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_text_model'''
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = vocab_size
__a : Optional[int] = hidden_size
__a : Dict = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[int] = hidden_act
__a : List[Any] = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = initializer_range
__a : Dict = layer_norm_eps
__a : Any = position_embedding_type
__a : Dict = use_cache
__a : Dict = pad_token_id
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : List[str] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_vision_model'''
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.2_5 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 0.0_0_1 , _UpperCAmelCase = 0.9_9 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Tuple = num_channels
__a : str = image_size
__a : List[Any] = width_coefficient
__a : Optional[int] = depth_coefficient
__a : Union[str, Any] = depth_divisor
__a : int = kernel_sizes
__a : Dict = in_channels
__a : List[str] = out_channels
__a : Any = depthwise_padding
__a : str = strides
__a : Optional[Any] = num_block_repeats
__a : Optional[Any] = expand_ratios
__a : Any = squeeze_expansion_ratio
__a : int = hidden_act
__a : Union[str, Any] = hidden_dim
__a : Union[str, Any] = pooling_type
__a : Tuple = initializer_range
__a : List[str] = batch_norm_eps
__a : List[Any] = batch_norm_momentum
__a : Union[str, Any] = drop_connect_rate
__a : List[Any] = sum(_UpperCAmelCase ) * 4
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Optional[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align'''
__lowerCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=640 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
__a : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
__a : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
__a : Any = AlignTextConfig(**_UpperCAmelCase )
__a : Any = AlignVisionConfig(**_UpperCAmelCase )
__a : Optional[int] = projection_dim
__a : Union[str, Any] = temperature_init_value
__a : int = initializer_range
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Tuple = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.__class__.model_type
return output
| 52 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : List[Any] = tempfile.mkdtemp()
# fmt: off
__a : Optional[int] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__a : Dict = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__a : List[str] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__a : List[str] = {'''unk_token''': '''<unk>'''}
__a : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
__a : Optional[Any] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__a : List[Any] = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : Any = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ):
__a : Any = self.get_tokenizer()
__a : Any = self.get_rust_tokenizer()
__a : Optional[Any] = self.get_image_processor()
__a : Optional[int] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
__a : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
__a : List[str] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
__a : Any = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : int = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : List[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__a : Optional[Any] = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__a : Tuple = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Any = self.get_image_processor()
__a : Union[str, Any] = self.get_tokenizer()
__a : Any = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__a : Tuple = self.prepare_image_inputs()
__a : Optional[Any] = image_processor(_UpperCAmelCase , return_tensors='''np''' )
__a : int = processor(images=_UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.get_image_processor()
__a : Optional[int] = self.get_tokenizer()
__a : Optional[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__a : Dict = '''lower newer'''
__a : Union[str, Any] = processor(text=_UpperCAmelCase )
__a : Any = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCamelCase ( self ):
__a : Any = self.get_image_processor()
__a : List[str] = self.get_tokenizer()
__a : str = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__a : List[Any] = '''lower newer'''
__a : Dict = self.prepare_image_inputs()
__a : List[str] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def _lowerCamelCase ( self ):
__a : str = self.get_image_processor()
__a : Optional[int] = self.get_tokenizer()
__a : List[str] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__a : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Union[str, Any] = processor.batch_decode(_UpperCAmelCase )
__a : Dict = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.get_image_processor()
__a : Any = self.get_tokenizer()
__a : str = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__a : Any = '''lower newer'''
__a : Optional[Any] = self.prepare_image_inputs()
__a : Any = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 52 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 1 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (DDPMScheduler,)
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : int = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_UpperCAmelCase )
return config
def _lowerCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Dict = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def _lowerCamelCase ( self ):
__a : int = self.scheduler_classes[0]
__a : int = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**_UpperCAmelCase )
__a : int = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[Any] = self.dummy_sample_deter
__a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Optional[int] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : List[Any] = pred_prev_sample
__a : int = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = self.scheduler_classes[0]
__a : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a : int = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[str] = self.dummy_sample_deter
__a : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Dict = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : Optional[int] = pred_prev_sample
__a : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Any = self.get_scheduler_config()
__a : str = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
__a : List[Any] = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
__a : Union[str, Any] = -1
else:
__a : str = timesteps[i + 1]
__a : Dict = scheduler.previous_timestep(_UpperCAmelCase )
__a : str = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
__a : Optional[int] = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : List[str] = scheduler_class(**_UpperCAmelCase )
__a : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 52 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A = logging.getLogger(__name__)
def __A ( a_ :Union[str, Any] , a_ :Dict) -> Union[str, Any]:
__a : Optional[int] = np.argmax(a_ , axis=1)
return np.sum(outputs == labels)
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf_8''') as f:
__a : List[Any] = csv.reader(a_)
__a : List[str] = []
next(a_) # skip the first line
for line in tqdm(a_):
output.append((''' '''.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def __A ( a_ :Dict , a_ :str , a_ :str , a_ :List[Any] , a_ :Tuple , a_ :List[Any]) -> Any:
__a : List[str] = []
for dataset in encoded_datasets:
__a : List[str] = len(a_)
__a : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
__a : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa)
__a : Tuple = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa)
__a : Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(a_):
__a : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = with_conta
__a : int = with_conta
__a : List[str] = len(a_) - 1
__a : int = len(a_) - 1
__a : Optional[int] = with_conta
__a : Tuple = with_conta
__a : List[Any] = mc_label
__a : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(a_) for t in all_inputs))
return tensor_datasets
def __A ( ) -> Union[str, Any]:
__a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , default='''openai-gpt''' , help='''pretrained model name''')
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''')
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''')
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=a_ , default='''''')
parser.add_argument('''--eval_dataset''' , type=a_ , default='''''')
parser.add_argument('''--seed''' , type=a_ , default=42)
parser.add_argument('''--num_train_epochs''' , type=a_ , default=3)
parser.add_argument('''--train_batch_size''' , type=a_ , default=8)
parser.add_argument('''--eval_batch_size''' , type=a_ , default=16)
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a_ , help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' , type=a_ , default=1)
parser.add_argument(
'''--max_steps''' , default=-1 , type=a_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=a_ , default=6.25e-5)
parser.add_argument('''--warmup_steps''' , default=0 , type=a_ , help='''Linear warmup over warmup_steps.''')
parser.add_argument('''--lr_schedule''' , type=a_ , default='''warmup_linear''')
parser.add_argument('''--weight_decay''' , type=a_ , default=0.0_1)
parser.add_argument('''--lm_coef''' , type=a_ , default=0.9)
parser.add_argument('''--n_valid''' , type=a_ , default=3_74)
parser.add_argument('''--server_ip''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
__a : str = parser.parse_args()
print(a_)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a_)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
__a : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
__a : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(a_ , a_))
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a : List[str] = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(a_)
__a : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_)
__a : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(a_))
model.to(a_)
# Load and encode the datasets
def tokenize_and_encode(a_ :List[Any]):
if isinstance(a_ , a_):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(a_))
elif isinstance(a_ , a_):
return obj
return [tokenize_and_encode(a_) for o in obj]
logger.info('''Encoding dataset...''')
__a : Dict = load_rocstories_dataset(args.train_dataset)
__a : int = load_rocstories_dataset(args.eval_dataset)
__a : Optional[int] = (train_dataset, eval_dataset)
__a : List[Any] = tokenize_and_encode(a_)
# Compute the max input length for the Transformer
__a : List[Any] = model.config.n_positions // 2 - 2
__a : int = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
__a : Union[str, Any] = min(a_ , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a : Tuple = pre_process_datasets(a_ , a_ , a_ , *a_)
__a , __a : Tuple = tensor_datasets[0], tensor_datasets[1]
__a : List[str] = TensorDataset(*a_)
__a : Optional[Any] = RandomSampler(a_)
__a : str = DataLoader(a_ , sampler=a_ , batch_size=args.train_batch_size)
__a : List[str] = TensorDataset(*a_)
__a : Optional[int] = SequentialSampler(a_)
__a : Optional[Any] = DataLoader(a_ , sampler=a_ , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a : int = args.max_steps
__a : Optional[int] = args.max_steps // (len(a_) // args.gradient_accumulation_steps) + 1
else:
__a : str = len(a_) // args.gradient_accumulation_steps * args.num_train_epochs
__a : List[Any] = list(model.named_parameters())
__a : Optional[int] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a : List[str] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], '''weight_decay''': 0.0},
]
__a : int = AdamW(a_ , lr=args.learning_rate , eps=args.adam_epsilon)
__a : Union[str, Any] = get_linear_schedule_with_warmup(
a_ , num_warmup_steps=args.warmup_steps , num_training_steps=a_)
if args.do_train:
__a , __a , __a : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc='''Epoch'''):
__a : Dict = 0
__a : Dict = 0
__a : List[str] = tqdm(a_ , desc='''Training''')
for step, batch in enumerate(a_):
__a : Dict = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : str = batch
__a : List[Any] = model(a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a : int = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a : Tuple = '''Training loss: {:.2e} lr: {:.2e}'''.format(a_ , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a : Dict = model.module if hasattr(a_ , '''module''') else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a : int = os.path.join(args.output_dir , a_)
__a : str = os.path.join(args.output_dir , a_)
torch.save(model_to_save.state_dict() , a_)
model_to_save.config.to_json_file(a_)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
__a : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(a_)
if args.do_eval:
model.eval()
__a , __a : List[Any] = 0, 0
__a , __a : Union[str, Any] = 0, 0
for batch in tqdm(a_ , desc='''Evaluating'''):
__a : str = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : List[Any] = batch
with torch.no_grad():
__a , __a , __a , __a : str = model(
a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : List[str] = mc_logits.detach().cpu().numpy()
__a : Optional[Any] = mc_labels.to('''cpu''').numpy()
__a : str = accuracy(a_ , a_)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
__a : Tuple = eval_loss / nb_eval_steps
__a : List[str] = eval_accuracy / nb_eval_examples
__a : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
__a : List[str] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a : Dict = os.path.join(args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key in sorted(result.keys()):
logger.info(''' %s = %s''' , a_ , str(result[key]))
writer.write('''%s = %s\n''' % (key, str(result[key])))
if __name__ == "__main__":
main()
| 52 | 1 |
"""simple docstring"""
def __A ( a_ :int) -> int:
__a : list[list[int]] = [[0 for _ in range(a_)] for _ in range(m + 1)]
for i in range(m + 1):
__a : int = 1
for n in range(m + 1):
for k in range(1 , a_):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
A = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
A = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 52 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : List[str] = is_training
__a : Optional[Any] = use_attention_mask
__a : Optional[Any] = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Dict = intermediate_size
__a : List[str] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : Optional[int] = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __A ( a_ :Optional[int] , a_ :List[str] , a_ :Any=None , a_ :Tuple=None , a_ :int=None , a_ :Optional[Any]=None , a_ :int=None , a_ :str=None , ) -> Tuple:
if attention_mask is None:
__a : Dict = np.where(input_ids != config.pad_token_id , 1 , 0)
if decoder_attention_mask is None:
__a : Any = np.where(decoder_input_ids != config.pad_token_id , 1 , 0)
if head_mask is None:
__a : str = np.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
__a : str = np.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
__a : str = np.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=0.0_2 , ):
__a : Union[str, Any] = parent
__a : Optional[int] = batch_size
__a : Union[str, Any] = seq_length
__a : Tuple = is_training
__a : Union[str, Any] = use_labels
__a : Optional[int] = vocab_size
__a : Tuple = hidden_size
__a : Optional[int] = num_hidden_layers
__a : Tuple = num_attention_heads
__a : Any = intermediate_size
__a : str = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Tuple = max_position_embeddings
__a : str = eos_token_id
__a : Tuple = pad_token_id
__a : List[str] = bos_token_id
__a : Optional[int] = initializer_range
def _lowerCamelCase ( self ):
__a : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__a : int = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__a : int = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
__a : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , )
__a : Tuple = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _lowerCamelCase ( self ):
__a , __a : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = 20
__a : List[Any] = model_class_name(_UpperCAmelCase )
__a : int = model.encode(inputs_dict['''input_ids'''] )
__a , __a : List[str] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__a : str = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
__a : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__a : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a : int = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__a : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__a : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , )
__a : Optional[int] = model.decode(_UpperCAmelCase , _UpperCAmelCase )
__a : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = 20
__a : str = model_class_name(_UpperCAmelCase )
__a : Dict = model.encode(inputs_dict['''input_ids'''] )
__a , __a : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__a : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__a : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a : Tuple = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__a : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__a : Tuple = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__a : Optional[Any] = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase )
__a : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = 99
def _lowerCamelCase ( self ):
__a : Optional[int] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__a : List[Any] = input_ids.shape[0]
__a : int = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCamelCase ( self ):
__a , __a , __a : List[str] = self._get_config_and_data()
__a : List[str] = FlaxBlenderbotSmallForConditionalGeneration(_UpperCAmelCase )
__a : Tuple = lm_model(input_ids=_UpperCAmelCase )
__a : Any = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__a : List[Any] = FlaxBlenderbotSmallForConditionalGeneration(_UpperCAmelCase )
__a : Optional[int] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__a : Dict = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__a : Dict = lm_model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
__a : Dict = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__a : Union[str, Any] = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
__a : Tuple = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
__a : Union[str, Any] = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__lowerCAmelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _lowerCamelCase ( self ):
__a : List[Any] = FlaxBlenderbotSmallModelTester(self )
def _lowerCamelCase ( self ):
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : Optional[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__a : int = model_class(_UpperCAmelCase )
@jax.jit
def encode_jitted(_UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ):
return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
with self.subTest('''JIT Enabled''' ):
__a : List[Any] = encode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__a : int = encode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self ):
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : Tuple = model_class(_UpperCAmelCase )
__a : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__a : Dict = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
return model.decode(
decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , )
with self.subTest('''JIT Enabled''' ):
__a : Any = decode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__a : Optional[int] = decode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : Tuple = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__a : Optional[Any] = np.ones((1, 1) ) * model.config.eos_token_id
__a : Tuple = model(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
| 52 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''levit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 8, 12] , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = image_size
__a : List[Any] = num_channels
__a : Dict = kernel_size
__a : Optional[int] = stride
__a : Optional[int] = padding
__a : Dict = hidden_sizes
__a : int = num_attention_heads
__a : Optional[int] = depths
__a : str = key_dim
__a : Union[str, Any] = drop_path_rate
__a : Optional[Any] = patch_size
__a : Tuple = attention_ratio
__a : int = mlp_ratio
__a : int = initializer_range
__a : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4
| 52 | 1 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A = 16
A = 32
def __A ( a_ :Accelerator , a_ :int = 16) -> Any:
__a : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''')
__a : List[str] = load_dataset('''glue''' , '''mrpc''')
def tokenize_function(a_ :List[str]):
# max_length=None => use the model max length (it's actually the default)
__a : List[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=a_ , max_length=a_)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a : str = datasets.map(
a_ , batched=a_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''')
def collate_fn(a_ :Dict):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a : Tuple = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
__a : int = 8
else:
__a : Tuple = None
return tokenizer.pad(
a_ , padding='''longest''' , max_length=a_ , pad_to_multiple_of=a_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
__a : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=a_ , collate_fn=a_ , batch_size=a_)
__a : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=a_ , collate_fn=a_ , batch_size=a_)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A = mocked_dataloaders # noqa: F811
def __A ( a_ :Union[str, Any] , a_ :str) -> Union[str, Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , a_) == "1":
__a : List[str] = 2
# Initialize accelerator
__a : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Any = config['''lr''']
__a : Dict = int(config['''num_epochs'''])
__a : List[str] = int(config['''seed'''])
__a : Union[str, Any] = int(config['''batch_size'''])
__a : Tuple = evaluate.load('''glue''' , '''mrpc''')
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=a_)
def inner_training_loop(a_ :List[str]):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(a_)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Tuple = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=a_)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a : str = model.to(accelerator.device)
# Instantiate optimizer
__a : int = AdamW(params=model.parameters() , lr=a_)
__a , __a : int = get_dataloaders(a_ , a_)
# Instantiate scheduler
__a : Any = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=1_00 , num_training_steps=(len(a_) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_)
# Now we train the model
for epoch in range(a_):
model.train()
for step, batch in enumerate(a_):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
__a : str = model(**a_)
__a : Optional[Any] = outputs.loss
accelerator.backward(a_)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(a_):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
__a : List[str] = model(**a_)
__a : List[str] = outputs.logits.argmax(dim=-1)
__a , __a : List[str] = accelerator.gather_for_metrics((predictions, batch['''labels''']))
metric.add_batch(
predictions=a_ , references=a_ , )
__a : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , a_)
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __A ( ) -> str:
__a : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''')
parser.add_argument(
'''--mixed_precision''' , type=a_ , default=a_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''')
__a : Optional[Any] = parser.parse_args()
__a : int = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(a_ , a_)
if __name__ == "__main__":
main()
| 52 |
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 52 | 1 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __A ( a_ :int) -> Dict:
# A local function to see if a dot lands in the circle.
def is_in_circle(a_ :float , a_ :float) -> bool:
__a : List[str] = sqrt((x**2) + (y**2))
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__a : Optional[Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0) , uniform(-1.0 , 1.0)))
for _ in range(a_))
# The ratio of the area for circle to square is pi/4.
__a : List[str] = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""")
print(F"""The numpy value of pi is {pi}""")
print(F"""The total error is {abs(pi - pi_estimate)}""")
def __A ( a_ :int , a_ :Callable[[float], float] , a_ :float = 0.0 , a_ :float = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(a_ , a_)) for _ in range(a_)) * (max_value - min_value)
def __A ( a_ :int , a_ :float = 0.0 , a_ :float = 1.0) -> None:
def identity_function(a_ :float) -> float:
return x
__a : List[str] = area_under_curve_estimator(
a_ , a_ , a_ , a_)
__a : Tuple = (max_value * max_value - min_value * min_value) / 2
print('''******************''')
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""")
print(F"""Estimated value is {estimated_value}""")
print(F"""Expected value is {expected_value}""")
print(F"""Total error is {abs(estimated_value - expected_value)}""")
print('''******************''')
def __A ( a_ :int) -> None:
def function_to_integrate(a_ :float) -> float:
return sqrt(4.0 - x * x)
__a : Any = area_under_curve_estimator(
a_ , a_ , 0.0 , 2.0)
print('''******************''')
print('''Estimating pi using area_under_curve_estimator''')
print(F"""Estimated value is {estimated_value}""")
print(F"""Expected value is {pi}""")
print(F"""Total error is {abs(estimated_value - pi)}""")
print('''******************''')
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Dict = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : List[str] = edge
__a : Optional[int] = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Optional[int] = f.read().strip().split('''\n''')
__a : Dict = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : Tuple = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 52 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''dinat'''
__lowerCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _UpperCAmelCase=4 , _UpperCAmelCase=3 , _UpperCAmelCase=64 , _UpperCAmelCase=[3, 4, 6, 5] , _UpperCAmelCase=[2, 4, 8, 16] , _UpperCAmelCase=7 , _UpperCAmelCase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _UpperCAmelCase=3.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Union[str, Any] = patch_size
__a : Union[str, Any] = num_channels
__a : str = embed_dim
__a : Dict = depths
__a : Union[str, Any] = len(_UpperCAmelCase )
__a : List[Any] = num_heads
__a : List[str] = kernel_size
__a : List[Any] = dilations
__a : int = mlp_ratio
__a : Optional[int] = qkv_bias
__a : Optional[int] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[Any] = drop_path_rate
__a : List[str] = hidden_act
__a : Union[str, Any] = layer_norm_eps
__a : Any = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__a : List[str] = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) )
__a : Optional[Any] = layer_scale_init_value
__a : Optional[Any] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(_UpperCAmelCase ) + 1 )]
__a , __a : Any = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''trocr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
__a : List[str] = vocab_size
__a : Optional[Any] = d_model
__a : Optional[Any] = decoder_layers
__a : Union[str, Any] = decoder_attention_heads
__a : int = decoder_ffn_dim
__a : List[Any] = activation_function
__a : Any = max_position_embeddings
__a : Dict = dropout
__a : List[Any] = attention_dropout
__a : Optional[Any] = activation_dropout
__a : str = init_std
__a : List[str] = decoder_layerdrop
__a : Union[str, Any] = use_cache
__a : Optional[Any] = scale_embedding
__a : List[Any] = use_learned_position_embeddings
__a : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 52 | 1 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __A ( *a_ :Union[str, Any]) -> Union[str, Any]:
with open(a_ , '''r''') as fh:
fcntl.flock(a_ , fcntl.LOCK_EX)
try:
print(*a_)
finally:
fcntl.flock(a_ , fcntl.LOCK_UN)
A = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
A = torch.device('''cuda''', local_rank)
A = socket.gethostname()
A = F'[{hostname}-{local_rank}]'
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
A = dist.get_rank()
A = dist.get_world_size()
printflock(F'{gpu} is OK (global rank: {rank}/{world_size})')
dist.barrier()
if rank == 0:
printflock(F'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}')
except Exception:
printflock(F'{gpu} is broken')
raise
| 52 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __A ( a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Optional[Any] , a_ :Optional[int]=5) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''') == 1
__a : Optional[Any] = torch.tensor(tokenizer.encode(a_ , add_special_tokens=a_)).unsqueeze(0) # Batch size 1
__a : Dict = model(a_)[0] # The last hidden-state is the first element of the output tuple
__a : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__a : Any = logits[0, masked_index, :]
__a : Any = logits.softmax(dim=0)
__a , __a : Optional[Any] = prob.topk(k=a_ , dim=0)
__a : Optional[int] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(a_))])
__a : List[str] = tokenizer.mask_token
__a : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''')):
__a : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''')
if " {0}".format(a_) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(a_) , a_),
values[index].item(),
predicted_token,
))
else:
topk_filled_outputs.append(
(
masked_input.replace(a_ , a_),
values[index].item(),
predicted_token,
))
return topk_filled_outputs
A = CamembertTokenizer.from_pretrained('''camembert-base''')
A = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
A = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 52 | 1 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __A ( a_ :BertModel , a_ :str , a_ :str) -> str:
__a : List[str] = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__a : Any = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(a_):
os.makedirs(a_)
__a : List[Any] = model.state_dict()
def to_tf_var_name(a_ :str):
for patt, repl in iter(a_):
__a : int = name.replace(a_ , a_)
return F"""bert/{name}"""
def create_tf_var(a_ :np.ndarray , a_ :str , a_ :tf.Session):
__a : int = tf.dtypes.as_dtype(tensor.dtype)
__a : Optional[int] = tf.get_variable(dtype=a_ , shape=tensor.shape , name=a_ , initializer=tf.zeros_initializer())
session.run(tf.variables_initializer([tf_var]))
session.run(a_)
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__a : Any = to_tf_var_name(a_)
__a : Tuple = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose):
__a : List[Any] = torch_tensor.T
__a : Optional[Any] = create_tf_var(tensor=a_ , name=a_ , session=a_)
tf.keras.backend.set_value(a_ , a_)
__a : int = session.run(a_)
print(F"""Successfully created {tf_name}: {np.allclose(a_ , a_)}""")
__a : Tuple = tf.train.Saver(tf.trainable_variables())
saver.save(a_ , os.path.join(a_ , model_name.replace('''-''' , '''_''') + '''.ckpt'''))
def __A ( a_ :int=None) -> str:
__a : str = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , required=a_ , help='''model name e.g. bert-base-uncased''')
parser.add_argument(
'''--cache_dir''' , type=a_ , default=a_ , required=a_ , help='''Directory containing pytorch model''')
parser.add_argument('''--pytorch_model_path''' , type=a_ , required=a_ , help='''/path/to/<pytorch-model-name>.bin''')
parser.add_argument('''--tf_cache_dir''' , type=a_ , required=a_ , help='''Directory in which to save tensorflow model''')
__a : Optional[Any] = parser.parse_args(a_)
__a : Optional[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name)
if __name__ == "__main__":
main()
| 52 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[int] = [10, 20, 30, 40, 50, 60]
__a : Union[str, Any] = [2, 4, 6, 8, 10, 12]
__a : List[str] = 100
self.assertEqual(kp.calc_profit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 210 )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Weight can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Profit can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(
_UpperCAmelCase , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52 | 1 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def __A ( a_ :Optional[Any]="ro" , a_ :List[str]="en" , a_ :str="wmt16" , a_ :str=None) -> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''')
__a : int = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""")
__a : Union[str, Any] = datasets.load_dataset(a_ , a_)
if save_dir is None:
__a : Union[str, Any] = F"""{dataset}-{pair}"""
__a : List[Any] = Path(a_)
save_dir.mkdir(exist_ok=a_)
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""")
# to save to val.source, val.target like summary datasets
__a : Optional[int] = '''val''' if split == '''validation''' else split
__a : Tuple = save_dir.joinpath(F"""{fn}.source""")
__a : int = save_dir.joinpath(F"""{fn}.target""")
__a : str = src_path.open('''w+''')
__a : Tuple = tgt_path.open('''w+''')
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split]):
__a : List[str] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''')
tgt_fp.write(ex[tgt_lang] + '''\n''')
print(F"""Saved {dataset} dataset to {save_dir}""")
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''llama'''
__lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32000 , _UpperCAmelCase=4096 , _UpperCAmelCase=11008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : Dict = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : str = hidden_size
__a : List[str] = intermediate_size
__a : Any = num_hidden_layers
__a : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__a : Union[str, Any] = num_attention_heads
__a : Optional[int] = num_key_value_heads
__a : Dict = hidden_act
__a : Union[str, Any] = initializer_range
__a : int = rms_norm_eps
__a : Optional[int] = pretraining_tp
__a : Optional[Any] = use_cache
__a : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__a : Tuple = self.rope_scaling.get('''type''' , _UpperCAmelCase )
__a : Optional[int] = self.rope_scaling.get('''factor''' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 52 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = KandinskyVaaPipeline
__lowerCAmelCase = [
'''image_embeds''',
'''negative_image_embeds''',
]
__lowerCAmelCase = ['''image_embeds''', '''negative_image_embeds''']
__lowerCAmelCase = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__lowerCAmelCase = False
@property
def _lowerCamelCase ( self ):
return 32
@property
def _lowerCamelCase ( self ):
return 32
@property
def _lowerCamelCase ( self ):
return self.time_input_dim
@property
def _lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self ):
return 100
@property
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
__a : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__a : str = UNetaDConditionModel(**_UpperCAmelCase )
return model
@property
def _lowerCamelCase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
__a : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCamelCase ( self ):
__a : str = self.dummy_unet
__a : Optional[Any] = self.dummy_movq
__a : str = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_UpperCAmelCase , )
__a : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
__a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
__a : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith('''mps''' ):
__a : Any = torch.manual_seed(_UpperCAmelCase )
else:
__a : Optional[int] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__a : Optional[int] = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowerCamelCase ( self ):
__a : str = '''cpu'''
__a : Any = self.get_dummy_components()
__a : List[Any] = self.pipeline_class(**_UpperCAmelCase )
__a : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Optional[int] = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__a : Union[str, Any] = output.images
__a : List[str] = pipe(
**self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0]
__a : Tuple = image[0, -3:, -3:, -1]
__a : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : Union[str, Any] = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
__a : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
__a : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCAmelCase )
__a : Any = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
__a : Tuple = pipeline.to(_UpperCAmelCase )
pipeline.set_progress_bar_config(disable=_UpperCAmelCase )
__a : str = '''red cat, 4k photo'''
__a : List[str] = torch.Generator(device='''cuda''' ).manual_seed(0 )
__a , __a : Dict = pipe_prior(
_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__a : Optional[Any] = torch.Generator(device='''cuda''' ).manual_seed(0 )
__a : Optional[int] = pipeline(
image_embeds=_UpperCAmelCase , negative_image_embeds=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=100 , output_type='''np''' , )
__a : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 52 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__a : int = parent
__a : str = batch_size
__a : List[Any] = num_channels
__a : Union[str, Any] = image_size
__a : List[Any] = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 20}
__a : str = do_thumbnail
__a : str = do_align_axis
__a : Dict = do_pad
__a : Union[str, Any] = do_normalize
__a : List[str] = image_mean
__a : Optional[int] = image_std
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : Tuple = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCamelCase ( self ):
pass
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : str = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 52 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
A = cvtColor(img, COLOR_BGR2GRAY)
def __A ( ) -> Optional[int]:
__a : List[str] = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def __A ( ) -> List[Any]:
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def __A ( ) -> List[Any]:
__a : int = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def __A ( ) -> Dict:
__a : Dict = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
__a : List[str] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def __A ( ) -> Dict:
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def __A ( ) -> str:
# laplace diagonals
__a : Dict = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]])
__a : Optional[int] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def __A ( ) -> Dict:
assert med.median_filter(a_ , 3).any()
def __A ( ) -> Union[str, Any]:
__a , __a : List[Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def __A ( ) -> Tuple:
__a : Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def __A ( a_ :str = "digital_image_processing/image_data/lena_small.jpg") -> Tuple:
__a : Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def __A ( a_ :str = "digital_image_processing/image_data/lena_small.jpg" , ) -> str:
__a : List[Any] = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def __A ( ) -> List[Any]:
__a : List[Any] = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
__a : Union[str, Any] = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
__a : Union[str, Any] = 0
__a : Union[str, Any] = 0
__a : List[Any] = image[x_coordinate][y_coordinate]
__a : Optional[int] = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__a : str = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
__a : Dict = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 52 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int]) -> int:
if not nums:
return 0
__a : Any = nums[0]
__a : Optional[Any] = 0
for num in nums[1:]:
__a , __a : Optional[Any] = (
max_excluding + num,
max(a_ , a_),
)
return max(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 1 |
"""simple docstring"""
import string
from math import logaa
def __A ( a_ :str , a_ :str) -> int:
__a : Optional[int] = document.translate(
str.maketrans('''''' , '''''' , string.punctuation)).replace('''\n''' , '''''')
__a : int = document_without_punctuation.split(''' ''') # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()])
def __A ( a_ :str , a_ :str) -> tuple[int, int]:
__a : List[Any] = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation)) # strip all punctuation and replace it with ''
__a : List[str] = corpus_without_punctuation.split('''\n''')
__a : Optional[int] = term.lower()
return (len([doc for doc in docs if term in doc]), len(a_))
def __A ( a_ :int , a_ :int , a_ :List[str]=False) -> float:
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''')
return round(1 + logaa(n / (1 + df)) , 3)
if df == 0:
raise ZeroDivisionError('''df must be > 0''')
elif n == 0:
raise ValueError('''log10(0) is undefined.''')
return round(logaa(n / df) , 3)
def __A ( a_ :int , a_ :int) -> float:
return round(tf * idf , 3)
| 52 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A = '''▁'''
A = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BigBirdTokenizer
__lowerCAmelCase = BigBirdTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def _lowerCamelCase ( self ):
super().setUp()
__a : Dict = self.tokenizer_class(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : List[str] = '''<s>'''
__a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_UpperCAmelCase ) , 1004 )
def _lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__a : Dict = self.get_tokenizer()
__a : Any = self.get_rust_tokenizer()
__a : int = '''I was born in 92000, and this is falsé.'''
__a : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
__a : List[str] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__a : Any = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = self.get_rust_tokenizer()
__a : Tuple = tokenizer.encode(_UpperCAmelCase )
__a : List[Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = BigBirdTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
__a : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
__a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _lowerCamelCase ( self ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def _lowerCamelCase ( self ):
__a : str = '''Hello World!'''
__a : str = [65, 18536, 2260, 101, 66]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def _lowerCamelCase ( self ):
__a : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__a : Optional[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__a : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__a : List[str] = ''' '''.join(_UpperCAmelCase )
__a : Tuple = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Optional[Any] = BigBirdConfig(attention_type='''original_full''' )
__a : Tuple = BigBirdModel(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
__a : Union[str, Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__a : List[Any] = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def _lowerCamelCase ( self ):
# fmt: off
__a : Optional[Any] = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 52 | 1 |
"""simple docstring"""
def __A ( a_ :int) -> str:
if isinstance(a_ , a_):
raise TypeError('''\'float\' object cannot be interpreted as an integer''')
if isinstance(a_ , a_):
raise TypeError('''\'str\' object cannot be interpreted as an integer''')
if num == 0:
return "0b0"
__a : Union[str, Any] = False
if num < 0:
__a : Union[str, Any] = True
__a : str = -num
__a : list[int] = []
while num > 0:
binary.insert(0 , num % 2)
num >>= 1
if negative:
return "-0b" + "".join(str(a_) for e in binary)
return "0b" + "".join(str(a_) for e in binary)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''convnextv2'''
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=224 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : List[str] = num_channels
__a : str = patch_size
__a : Dict = num_stages
__a : List[str] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__a : List[str] = [3, 3, 9, 3] if depths is None else depths
__a : List[Any] = hidden_act
__a : Any = initializer_range
__a : Optional[int] = layer_norm_eps
__a : List[Any] = drop_path_rate
__a : Any = image_size
__a : str = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a : Optional[int] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 52 | 1 |
"""simple docstring"""
import math
import random
def __A ( a_ :float , a_ :bool = False) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value))
# Initial Value
A = 0.02
def __A ( a_ :int , a_ :int) -> float:
__a : List[Any] = float(2 * (random.randint(1 , 1_00)) - 1)
for _ in range(a_):
# Forward propagation
__a : int = sigmoid_function(INITIAL_VALUE * weight)
# How much did we miss?
__a : int = (expected / 1_00) - layer_a
# Error delta
__a : Dict = layer_1_error * sigmoid_function(a_ , a_)
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
A = int(input('''Expected value: '''))
A = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 52 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (DDPMScheduler,)
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : int = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_UpperCAmelCase )
return config
def _lowerCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Dict = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def _lowerCamelCase ( self ):
__a : int = self.scheduler_classes[0]
__a : int = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**_UpperCAmelCase )
__a : int = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[Any] = self.dummy_sample_deter
__a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Optional[int] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : List[Any] = pred_prev_sample
__a : int = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = self.scheduler_classes[0]
__a : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a : int = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[str] = self.dummy_sample_deter
__a : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Dict = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : Optional[int] = pred_prev_sample
__a : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Any = self.get_scheduler_config()
__a : str = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
__a : List[Any] = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
__a : Union[str, Any] = -1
else:
__a : str = timesteps[i + 1]
__a : Dict = scheduler.previous_timestep(_UpperCAmelCase )
__a : str = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
__a : Optional[int] = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : List[str] = scheduler_class(**_UpperCAmelCase )
__a : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 52 | 1 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __A ( a_ :int , a_ :int , a_ :float = 1 / sqrt(2)) -> IIRFilter:
__a : str = tau * frequency / samplerate
__a : List[Any] = sin(a_)
__a : str = cos(a_)
__a : Tuple = _sin / (2 * q_factor)
__a : Optional[Any] = (1 - _cos) / 2
__a : int = 1 - _cos
__a : List[Any] = 1 + alpha
__a : str = -2 * _cos
__a : Tuple = 1 - alpha
__a : Optional[Any] = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def __A ( a_ :int , a_ :int , a_ :float = 1 / sqrt(2)) -> IIRFilter:
__a : str = tau * frequency / samplerate
__a : Tuple = sin(a_)
__a : Optional[int] = cos(a_)
__a : Optional[int] = _sin / (2 * q_factor)
__a : Optional[Any] = (1 + _cos) / 2
__a : List[Any] = -1 - _cos
__a : Dict = 1 + alpha
__a : str = -2 * _cos
__a : List[str] = 1 - alpha
__a : Dict = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def __A ( a_ :int , a_ :int , a_ :float = 1 / sqrt(2)) -> IIRFilter:
__a : Optional[int] = tau * frequency / samplerate
__a : List[Any] = sin(a_)
__a : Union[str, Any] = cos(a_)
__a : Tuple = _sin / (2 * q_factor)
__a : List[str] = _sin / 2
__a : List[str] = 0
__a : Optional[Any] = -ba
__a : Tuple = 1 + alpha
__a : Optional[int] = -2 * _cos
__a : Union[str, Any] = 1 - alpha
__a : Tuple = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def __A ( a_ :int , a_ :int , a_ :float = 1 / sqrt(2)) -> IIRFilter:
__a : Dict = tau * frequency / samplerate
__a : Dict = sin(a_)
__a : Any = cos(a_)
__a : Union[str, Any] = _sin / (2 * q_factor)
__a : List[Any] = 1 - alpha
__a : str = -2 * _cos
__a : Union[str, Any] = 1 + alpha
__a : List[str] = IIRFilter(2)
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba])
return filt
def __A ( a_ :int , a_ :int , a_ :float , a_ :float = 1 / sqrt(2) , ) -> IIRFilter:
__a : Dict = tau * frequency / samplerate
__a : Optional[Any] = sin(a_)
__a : Union[str, Any] = cos(a_)
__a : str = _sin / (2 * q_factor)
__a : Optional[Any] = 10 ** (gain_db / 40)
__a : List[str] = 1 + alpha * big_a
__a : str = -2 * _cos
__a : Any = 1 - alpha * big_a
__a : Union[str, Any] = 1 + alpha / big_a
__a : Tuple = -2 * _cos
__a : str = 1 - alpha / big_a
__a : List[str] = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def __A ( a_ :int , a_ :int , a_ :float , a_ :float = 1 / sqrt(2) , ) -> IIRFilter:
__a : Union[str, Any] = tau * frequency / samplerate
__a : Dict = sin(a_)
__a : str = cos(a_)
__a : List[Any] = _sin / (2 * q_factor)
__a : int = 10 ** (gain_db / 40)
__a : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
__a : int = (big_a + 1) + (big_a - 1) * _cos
__a : str = (big_a - 1) - (big_a + 1) * _cos
__a : List[Any] = (big_a - 1) + (big_a + 1) * _cos
__a : List[Any] = 2 * sqrt(a_) * alpha
__a : Dict = big_a * (pmc + aaa)
__a : Dict = 2 * big_a * mpc
__a : str = big_a * (pmc - aaa)
__a : Optional[Any] = ppmc + aaa
__a : List[str] = -2 * pmpc
__a : int = ppmc - aaa
__a : Tuple = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
def __A ( a_ :int , a_ :int , a_ :float , a_ :float = 1 / sqrt(2) , ) -> IIRFilter:
__a : int = tau * frequency / samplerate
__a : str = sin(a_)
__a : Dict = cos(a_)
__a : int = _sin / (2 * q_factor)
__a : Union[str, Any] = 10 ** (gain_db / 40)
__a : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
__a : List[str] = (big_a + 1) + (big_a - 1) * _cos
__a : List[Any] = (big_a - 1) - (big_a + 1) * _cos
__a : List[Any] = (big_a - 1) + (big_a + 1) * _cos
__a : Any = 2 * sqrt(a_) * alpha
__a : Optional[int] = big_a * (ppmc + aaa)
__a : List[str] = -2 * big_a * pmpc
__a : Union[str, Any] = big_a * (ppmc - aaa)
__a : List[Any] = pmc + aaa
__a : Optional[Any] = 2 * mpc
__a : Any = pmc - aaa
__a : Dict = IIRFilter(2)
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba])
return filt
| 52 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
A = random.Random()
def __A ( a_ :Tuple , a_ :Dict=1.0 , a_ :str=None , a_ :List[Any]=None) -> Dict:
if rng is None:
__a : Any = global_rng
__a : Tuple = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=2048 , _UpperCAmelCase=128 , _UpperCAmelCase=1 , _UpperCAmelCase=512 , _UpperCAmelCase=30 , _UpperCAmelCase=44100 , ):
__a : Any = parent
__a : Tuple = batch_size
__a : Tuple = min_seq_length
__a : List[str] = max_seq_length
__a : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a : Tuple = spectrogram_length
__a : int = feature_size
__a : int = num_audio_channels
__a : Tuple = hop_length
__a : List[Any] = chunk_length
__a : Any = sampling_rate
def _lowerCamelCase ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
__a : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : Optional[Any] = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = TvltFeatureExtractor
def _lowerCamelCase ( self ):
__a : Optional[Any] = TvltFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
__a : int = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''hop_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''sampling_rate''' ) )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[str] = feat_extract_first.save_pretrained(_UpperCAmelCase )[0]
check_json_file_has_correct_format(_UpperCAmelCase )
__a : Union[str, Any] = self.feature_extraction_class.from_pretrained(_UpperCAmelCase )
__a : Tuple = feat_extract_first.to_dict()
__a : List[Any] = feat_extract_second.to_dict()
__a : int = dict_first.pop('''mel_filters''' )
__a : List[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : int = os.path.join(_UpperCAmelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(_UpperCAmelCase )
__a : Optional[Any] = self.feature_extraction_class.from_json_file(_UpperCAmelCase )
__a : Optional[Any] = feat_extract_first.to_dict()
__a : Any = feat_extract_second.to_dict()
__a : Optional[Any] = dict_first.pop('''mel_filters''' )
__a : Dict = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
# Initialize feature_extractor
__a : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Union[str, Any] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__a : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__a : int = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__a : List[Any] = feature_extractor(
_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 , mask_audio=_UpperCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__a : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a : Any = np.asarray(_UpperCAmelCase )
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__a : int = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
__a : List[str] = self._load_datasamples(1 )
__a : Tuple = TvltFeatureExtractor()
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__a : Dict = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _UpperCAmelCase , atol=1e-4 ) )
| 52 | 1 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int] , a_ :list[int] , a_ :list[int] , a_ :list[list[str]] , a_ :int , ) -> None:
__a : Tuple = len(a_)
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board])
return
# We iterate each column in the row to find all possible results in each row
for col in range(a_):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , a_ , a_ , )
def __A ( a_ :int) -> None:
__a : list[list[str]] = []
depth_first_search([] , [] , [] , a_ , a_)
# Print all the boards
for board in boards:
for column in board:
print(a_)
print('''''')
print(len(a_) , '''solutions were found.''')
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 52 |
"""simple docstring"""
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : List[Any] = text, pattern
__a , __a : Tuple = len(_UpperCAmelCase ), len(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self ):
# searches pattern in text and returns index positions
__a : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
__a : List[str] = self.mismatch_in_text(_UpperCAmelCase )
if mismatch_index == -1:
positions.append(_UpperCAmelCase )
else:
__a : Tuple = self.match_in_pattern(self.text[mismatch_index] )
__a : Optional[int] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A = '''ABAABA'''
A = '''AB'''
A = BoyerMooreSearch(text, pattern)
A = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 52 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
A = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
A = F'https://www.google.com/search?q={query}&num=100'
A = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
A = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
A = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
A = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def __A ( a_ :Dict , a_ :Union[str, Any] , a_ :List[Any] , a_ :Dict=None) -> Any:
# Initialise PyTorch model
__a : Union[str, Any] = XLNetConfig.from_json_file(a_)
__a : List[Any] = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""")
__a : Optional[Any] = finetuning_task
__a : Tuple = GLUE_TASKS_NUM_LABELS[finetuning_task]
__a : Any = XLNetForSequenceClassification(a_)
elif "squad" in finetuning_task:
__a : Union[str, Any] = finetuning_task
__a : Union[str, Any] = XLNetForQuestionAnswering(a_)
else:
__a : int = XLNetLMHeadModel(a_)
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a_ , a_ , a_)
# Save pytorch-model
__a : int = os.path.join(a_ , a_)
__a : str = os.path.join(a_ , a_)
print(F"""Save PyTorch model to {os.path.abspath(a_)}""")
torch.save(model.state_dict() , a_)
print(F"""Save configuration file to {os.path.abspath(a_)}""")
with open(a_ , '''w''' , encoding='''utf-8''') as f:
f.write(config.to_json_string())
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
A = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 52 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = False
__lowerCAmelCase = 3.0
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a : List[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__a : int = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _UpperCAmelCase )
@require_multi_gpu
def _lowerCamelCase ( self ):
__a : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A = Accelerator(kwargs_handlers=[ddp_scaler])
A = torch.nn.Linear(100, 200)
A = accelerator.prepare(model)
# Check the values changed in kwargs
A = ''''''
A = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 52 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = PegasusConfig
__lowerCAmelCase = {}
__lowerCAmelCase = '''gelu'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=40 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ):
__a : Union[str, Any] = parent
__a : Tuple = batch_size
__a : Tuple = seq_length
__a : List[Any] = is_training
__a : Optional[int] = use_labels
__a : int = vocab_size
__a : Optional[Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : Tuple = hidden_dropout_prob
__a : int = attention_probs_dropout_prob
__a : Tuple = max_position_embeddings
__a : List[Any] = eos_token_id
__a : Optional[int] = pad_token_id
__a : Optional[int] = bos_token_id
def _lowerCamelCase ( self ):
__a : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__a : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__a : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__a : Optional[Any] = prepare_pegasus_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = TFPegasusModel(config=_UpperCAmelCase ).get_decoder()
__a : int = inputs_dict['''input_ids''']
__a : List[str] = input_ids[:1, :]
__a : List[str] = inputs_dict['''attention_mask'''][:1, :]
__a : Optional[int] = inputs_dict['''head_mask''']
__a : List[Any] = 1
# first forward pass
__a : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
__a , __a : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__a : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
__a : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__a : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__a : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__a : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__a : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
__a : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-3 )
def __A ( a_ :List[Any] , a_ :str , a_ :int , a_ :Union[str, Any]=None , a_ :Optional[Any]=None , a_ :Union[str, Any]=None , a_ :List[Any]=None , a_ :List[str]=None , ) -> Dict:
if attention_mask is None:
__a : Dict = tf.cast(tf.math.not_equal(a_ , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
__a : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
__a : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
__a : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
__a : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__lowerCAmelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__lowerCAmelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : List[str] = TFPegasusModelTester(self )
__a : int = ConfigTester(self , config_class=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
__a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__lowerCAmelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__lowerCAmelCase = '''google/pegasus-xsum'''
@cached_property
def _lowerCamelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowerCamelCase ( self ):
__a : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : str = self.translate_src_text(**_UpperCAmelCase )
assert self.expected_text == generated_words
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : List[Any] = self.tokenizer(self.src_text , **_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''tf''' )
__a : Tuple = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_UpperCAmelCase , )
__a : Any = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_UpperCAmelCase )
return generated_words
@slow
def _lowerCamelCase ( self ):
self._assert_generated_batch_equal_expected()
| 52 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A = get_logger(__name__)
A = Path(__file__).parent / '''model_card_template.md'''
A = uuida().hex
A = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __A ( a_ :Union[Dict, str, None] = None) -> str:
__a : Union[str, Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''').upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(a_ , a_):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items())
elif isinstance(a_ , a_):
ua += "; " + user_agent
return ua
def __A ( a_ :str , a_ :Optional[str] = None , a_ :Optional[str] = None) -> Optional[int]:
if token is None:
__a : Any = HfFolder.get_token()
if organization is None:
__a : List[Any] = whoami(a_)['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def __A ( a_ :Union[str, Any] , a_ :List[str]) -> Optional[Any]:
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''')
if hasattr(a_ , '''local_rank''') and args.local_rank not in [-1, 0]:
return
__a : int = args.hub_token if hasattr(a_ , '''hub_token''') else None
__a : Any = get_full_repo_name(a_ , token=a_)
__a : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=a_ , model_name=a_ , repo_name=a_ , dataset_name=args.dataset_name if hasattr(a_ , '''dataset_name''') else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(a_ , '''gradient_accumulation_steps''') else None
) , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta1''') else None , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta2''') else None , adam_weight_decay=args.adam_weight_decay if hasattr(a_ , '''adam_weight_decay''') else None , adam_epsilon=args.adam_epsilon if hasattr(a_ , '''adam_epsilon''') else None , lr_scheduler=args.lr_scheduler if hasattr(a_ , '''lr_scheduler''') else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(a_ , '''lr_warmup_steps''') else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(a_ , '''ema_inv_gamma''') else None , ema_power=args.ema_power if hasattr(a_ , '''ema_power''') else None , ema_max_decay=args.ema_max_decay if hasattr(a_ , '''ema_max_decay''') else None , mixed_precision=args.mixed_precision , )
__a : List[Any] = os.path.join(args.output_dir , '''README.md''')
model_card.save(a_)
def __A ( a_ :Optional[str] , a_ :Optional[str] = None) -> Union[str, Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
__a : Any = str(Path(a_).as_posix())
__a : Optional[int] = re.search(R'''snapshots/([^/]+)/''' , a_)
if search is None:
return None
__a : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(a_) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
A = os.path.join(hf_cache_home, '''diffusers''')
def __A ( a_ :Optional[str] = None , a_ :Optional[str] = None) -> None:
if new_cache_dir is None:
__a : Dict = DIFFUSERS_CACHE
if old_cache_dir is None:
__a : List[Any] = old_diffusers_cache
__a : Union[str, Any] = Path(a_).expanduser()
__a : Dict = Path(a_).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*'''):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__a : List[Any] = new_cache_dir / old_blob_path.relative_to(a_)
new_blob_path.parent.mkdir(parents=a_ , exist_ok=a_)
os.replace(a_ , a_)
try:
os.symlink(a_ , a_)
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''')
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
A = 0
else:
with open(cache_version_file) as f:
try:
A = int(f.read())
except ValueError:
A = 0
if cache_version < 1:
A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
A = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __A ( a_ :str , a_ :Optional[str] = None) -> str:
if variant is not None:
__a : Dict = weights_name.split('''.''')
__a : List[Any] = splits[:-1] + [variant] + splits[-1:]
__a : Tuple = '''.'''.join(a_)
return weights_name
def __A ( a_ :List[Any] , *,
a_ :Union[str, Any] , a_ :Dict , a_ :Union[str, Any] , a_ :Optional[int] , a_ :str , a_ :Any , a_ :str , a_ :Optional[int] , a_ :str , a_ :Tuple , a_ :List[str]=None , ) -> Dict:
__a : int = str(a_)
if os.path.isfile(a_):
return pretrained_model_name_or_path
elif os.path.isdir(a_):
if os.path.isfile(os.path.join(a_ , a_)):
# Load from a PyTorch checkpoint
__a : Union[str, Any] = os.path.join(a_ , a_)
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(a_ , a_ , a_)):
__a : Optional[Any] = os.path.join(a_ , a_ , a_)
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""")
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(a_).base_version) >= version.parse('''0.20.0''')
):
try:
__a : Any = hf_hub_download(
a_ , filename=_add_variant(a_ , a_) , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , a_ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(a_ , a_)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(a_ , a_)}' so that the correct variant file can be added.""" , a_ , )
try:
# 2. Load model file as usual
__a : Optional[Any] = hf_hub_download(
a_ , filename=a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''')
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""")
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""")
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""")
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''')
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""")
| 52 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=[10, 20, 30, 40] , _UpperCAmelCase=[2, 2, 3, 2] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=["stage2", "stage3", "stage4"] , _UpperCAmelCase=[2, 3, 4] , _UpperCAmelCase=None , ):
__a : Union[str, Any] = parent
__a : str = batch_size
__a : Union[str, Any] = image_size
__a : List[str] = num_channels
__a : Optional[int] = num_stages
__a : Optional[int] = hidden_sizes
__a : Dict = depths
__a : Optional[int] = is_training
__a : List[str] = use_labels
__a : Optional[Any] = intermediate_size
__a : Optional[Any] = hidden_act
__a : str = num_labels
__a : Optional[int] = initializer_range
__a : Dict = out_features
__a : List[Any] = out_indices
__a : str = scope
def _lowerCamelCase ( self ):
__a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Any = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Union[str, Any] = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[str] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[Any] = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__a : Any = None
__a : Optional[Any] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Any = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.prepare_config_and_inputs()
__a , __a , __a : Any = config_and_inputs
__a : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.prepare_config_and_inputs()
__a , __a , __a : int = config_and_inputs
__a : Union[str, Any] = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : Any = ConvNextVaModelTester(self )
__a : str = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self ):
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_with_labels()
__a : List[str] = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
__a : Dict = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
__a : Optional[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
__a : Dict = model(**_UpperCAmelCase ).loss
loss.backward()
def _lowerCamelCase ( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__a : Dict = False
__a : Optional[Any] = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__a : int = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
__a : List[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
__a : List[str] = model(**_UpperCAmelCase ).loss
loss.backward()
def _lowerCamelCase ( self ):
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Tuple = model_class(_UpperCAmelCase )
__a : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Union[str, Any] = [*signature.parameters.keys()]
__a : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__a : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : Dict = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : str = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Tuple = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __A ( ) -> str:
__a : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self ):
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
__a : Optional[Any] = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_UpperCAmelCase )
__a : str = self.default_image_processor
__a : Optional[Any] = prepare_img()
__a : Optional[Any] = preprocessor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__a : List[str] = model(**_UpperCAmelCase )
# verify the logits
__a : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__a : Optional[Any] = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 52 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_text_model'''
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = vocab_size
__a : Optional[int] = hidden_size
__a : Dict = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[int] = hidden_act
__a : List[Any] = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = initializer_range
__a : Dict = layer_norm_eps
__a : Any = position_embedding_type
__a : Dict = use_cache
__a : Dict = pad_token_id
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : List[str] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_vision_model'''
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.2_5 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 0.0_0_1 , _UpperCAmelCase = 0.9_9 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Tuple = num_channels
__a : str = image_size
__a : List[Any] = width_coefficient
__a : Optional[int] = depth_coefficient
__a : Union[str, Any] = depth_divisor
__a : int = kernel_sizes
__a : Dict = in_channels
__a : List[str] = out_channels
__a : Any = depthwise_padding
__a : str = strides
__a : Optional[Any] = num_block_repeats
__a : Optional[Any] = expand_ratios
__a : Any = squeeze_expansion_ratio
__a : int = hidden_act
__a : Union[str, Any] = hidden_dim
__a : Union[str, Any] = pooling_type
__a : Tuple = initializer_range
__a : List[str] = batch_norm_eps
__a : List[Any] = batch_norm_momentum
__a : Union[str, Any] = drop_connect_rate
__a : List[Any] = sum(_UpperCAmelCase ) * 4
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Optional[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align'''
__lowerCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=640 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
__a : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
__a : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
__a : Any = AlignTextConfig(**_UpperCAmelCase )
__a : Any = AlignVisionConfig(**_UpperCAmelCase )
__a : Optional[int] = projection_dim
__a : Union[str, Any] = temperature_init_value
__a : int = initializer_range
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Tuple = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.__class__.model_type
return output
| 52 | 1 |
"""simple docstring"""
import argparse
import struct
import unittest
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
__a : Any = data
# Initialize hash values
__a : int = [
0x6a09_e667,
0xbb67_ae85,
0x3c6e_f372,
0xa54f_f53a,
0x510e_527f,
0x9b05_688c,
0x1f83_d9ab,
0x5be0_cd19,
]
# Initialize round constants
__a : Union[str, Any] = [
0x428a_2f98,
0x7137_4491,
0xb5c0_fbcf,
0xe9b5_dba5,
0x3956_c25b,
0x59f1_11f1,
0x923f_82a4,
0xab1c_5ed5,
0xd807_aa98,
0x1283_5b01,
0x2431_85be,
0x550c_7dc3,
0x72be_5d74,
0x80de_b1fe,
0x9bdc_06a7,
0xc19b_f174,
0xe49b_69c1,
0xefbe_4786,
0x0fc1_9dc6,
0x240c_a1cc,
0x2de9_2c6f,
0x4a74_84aa,
0x5cb0_a9dc,
0x76f9_88da,
0x983e_5152,
0xa831_c66d,
0xb003_27c8,
0xbf59_7fc7,
0xc6e0_0bf3,
0xd5a7_9147,
0x06ca_6351,
0x1429_2967,
0x27b7_0a85,
0x2e1b_2138,
0x4d2c_6dfc,
0x5338_0d13,
0x650a_7354,
0x766a_0abb,
0x81c2_c92e,
0x9272_2c85,
0xa2bf_e8a1,
0xa81a_664b,
0xc24b_8b70,
0xc76c_51a3,
0xd192_e819,
0xd699_0624,
0xf40e_3585,
0x106a_a070,
0x19a4_c116,
0x1e37_6c08,
0x2748_774c,
0x34b0_bcb5,
0x391c_0cb3,
0x4ed8_aa4a,
0x5b9c_ca4f,
0x682e_6ff3,
0x748f_82ee,
0x78a5_636f,
0x84c8_7814,
0x8cc7_0208,
0x90be_fffa,
0xa450_6ceb,
0xbef9_a3f7,
0xc671_78f2,
]
__a : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase ):
__a : Union[str, Any] = b'''\x80''' + (b'''\x00''' * (63 - (len(_UpperCAmelCase ) + 8) % 64))
__a : Tuple = struct.pack('''>Q''' , (len(_UpperCAmelCase ) * 8) )
return data + padding + big_endian_integer
def _lowerCamelCase ( self ):
# Convert into blocks of 64 bytes
__a : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__a : Union[str, Any] = list(struct.unpack('''>16L''' , _UpperCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
__a , __a , __a , __a , __a , __a , __a , __a : str = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__a : int = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__a : Optional[Any] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__a : Optional[int] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
__a : int = self.ror(_UpperCAmelCase , 6 ) ^ self.ror(_UpperCAmelCase , 11 ) ^ self.ror(_UpperCAmelCase , 25 )
__a : Optional[int] = (e & f) ^ ((~e & 0xffff_ffff) & g)
__a : Dict = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
__a : Tuple = self.ror(_UpperCAmelCase , 2 ) ^ self.ror(_UpperCAmelCase , 13 ) ^ self.ror(_UpperCAmelCase , 22 )
__a : Optional[int] = (a & b) ^ (a & c) ^ (b & c)
__a : Any = (sa + maj) % 0x1_0000_0000
__a , __a , __a , __a , __a , __a , __a , __a : Optional[Any] = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
__a : Tuple = [a, b, c, d, e, f, g, h]
# Modify final values
__a : Optional[Any] = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
__a : List[Any] = ''''''.join([hex(_UpperCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
return 0xffff_ffff & (value << (32 - rotations)) | (value >> rotations)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
import hashlib
__a : Any = bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(_UpperCAmelCase ).hash , hashlib.shaaaa(_UpperCAmelCase ).hexdigest() )
def __A ( ) -> None:
import doctest
doctest.testmod()
__a : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument(
'''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''')
__a : Dict = parser.parse_args()
__a : Dict = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''') as f:
__a : Any = f.read()
else:
__a : Optional[Any] = bytes(a_ , '''utf-8''')
print(SHAaaa(a_).hash)
if __name__ == "__main__":
main()
| 52 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 255 , _UpperCAmelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a : Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
__a : List[str] = parent
__a : str = batch_size
__a : Any = num_channels
__a : List[str] = min_resolution
__a : str = max_resolution
__a : Optional[int] = do_resize
__a : Dict = size
__a : Union[str, Any] = do_normalize
__a : Tuple = image_mean
__a : Dict = image_std
__a : str = do_rescale
__a : Optional[int] = rescale_factor
__a : Tuple = do_pad
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
if not batched:
__a : Dict = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
__a , __a : Union[str, Any] = image.size
else:
__a , __a : Any = image.shape[1], image.shape[2]
if w < h:
__a : List[str] = int(self.size['''shortest_edge'''] * h / w )
__a : int = self.size['''shortest_edge''']
elif w > h:
__a : Dict = self.size['''shortest_edge''']
__a : int = int(self.size['''shortest_edge'''] * w / h )
else:
__a : str = self.size['''shortest_edge''']
__a : str = self.size['''shortest_edge''']
else:
__a : Optional[int] = []
for image in image_inputs:
__a , __a : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : Optional[int] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
__a : str = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = YolosImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : Optional[Any] = YolosImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
__a : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
__a : List[Any] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : Tuple = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
__a , __a : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : Any = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
__a , __a : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ):
# Initialize image_processings
__a : Dict = self.image_processing_class(**self.image_processor_dict )
__a : Any = self.image_processing_class(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_rescale=_UpperCAmelCase )
# create random PyTorch tensors
__a : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__a : List[str] = image_processing_a.pad(_UpperCAmelCase , return_tensors='''pt''' )
__a : Dict = image_processing_a(_UpperCAmelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def _lowerCamelCase ( self ):
# prepare image and target
__a : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__a : Optional[Any] = json.loads(f.read() )
__a : Optional[Any] = {'''image_id''': 39769, '''annotations''': target}
# encode them
__a : Any = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
__a : List[Any] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
__a : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _UpperCAmelCase )
__a : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) )
# verify area
__a : Union[str, Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _UpperCAmelCase ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _UpperCAmelCase )
__a : int = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _UpperCAmelCase , atol=1e-3 ) )
# verify image_id
__a : Union[str, Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _UpperCAmelCase ) )
# verify is_crowd
__a : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _UpperCAmelCase ) )
# verify class_labels
__a : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _UpperCAmelCase ) )
# verify orig_size
__a : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _UpperCAmelCase ) )
# verify size
__a : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _UpperCAmelCase ) )
@slow
def _lowerCamelCase ( self ):
# prepare image, target and masks_path
__a : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__a : Optional[Any] = json.loads(f.read() )
__a : int = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
__a : str = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__a : List[str] = YolosImageProcessor(format='''coco_panoptic''' )
__a : Tuple = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
__a : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , _UpperCAmelCase )
__a : Dict = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) )
# verify area
__a : List[str] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _UpperCAmelCase ) )
# verify boxes
__a : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _UpperCAmelCase )
__a : Dict = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _UpperCAmelCase , atol=1e-3 ) )
# verify image_id
__a : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _UpperCAmelCase ) )
# verify is_crowd
__a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _UpperCAmelCase ) )
# verify class_labels
__a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _UpperCAmelCase ) )
# verify masks
__a : Any = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _UpperCAmelCase )
# verify orig_size
__a : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _UpperCAmelCase ) )
# verify size
__a : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _UpperCAmelCase ) )
| 52 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A = logging.getLogger(__name__)
def __A ( a_ :Union[str, Any] , a_ :Dict) -> Union[str, Any]:
__a : Optional[int] = np.argmax(a_ , axis=1)
return np.sum(outputs == labels)
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf_8''') as f:
__a : List[Any] = csv.reader(a_)
__a : List[str] = []
next(a_) # skip the first line
for line in tqdm(a_):
output.append((''' '''.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def __A ( a_ :Dict , a_ :str , a_ :str , a_ :List[Any] , a_ :Tuple , a_ :List[Any]) -> Any:
__a : List[str] = []
for dataset in encoded_datasets:
__a : List[str] = len(a_)
__a : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
__a : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa)
__a : Tuple = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa)
__a : Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(a_):
__a : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = with_conta
__a : int = with_conta
__a : List[str] = len(a_) - 1
__a : int = len(a_) - 1
__a : Optional[int] = with_conta
__a : Tuple = with_conta
__a : List[Any] = mc_label
__a : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(a_) for t in all_inputs))
return tensor_datasets
def __A ( ) -> Union[str, Any]:
__a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , default='''openai-gpt''' , help='''pretrained model name''')
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''')
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''')
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=a_ , default='''''')
parser.add_argument('''--eval_dataset''' , type=a_ , default='''''')
parser.add_argument('''--seed''' , type=a_ , default=42)
parser.add_argument('''--num_train_epochs''' , type=a_ , default=3)
parser.add_argument('''--train_batch_size''' , type=a_ , default=8)
parser.add_argument('''--eval_batch_size''' , type=a_ , default=16)
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a_ , help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' , type=a_ , default=1)
parser.add_argument(
'''--max_steps''' , default=-1 , type=a_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=a_ , default=6.25e-5)
parser.add_argument('''--warmup_steps''' , default=0 , type=a_ , help='''Linear warmup over warmup_steps.''')
parser.add_argument('''--lr_schedule''' , type=a_ , default='''warmup_linear''')
parser.add_argument('''--weight_decay''' , type=a_ , default=0.0_1)
parser.add_argument('''--lm_coef''' , type=a_ , default=0.9)
parser.add_argument('''--n_valid''' , type=a_ , default=3_74)
parser.add_argument('''--server_ip''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
__a : str = parser.parse_args()
print(a_)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a_)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
__a : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
__a : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(a_ , a_))
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a : List[str] = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(a_)
__a : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_)
__a : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(a_))
model.to(a_)
# Load and encode the datasets
def tokenize_and_encode(a_ :List[Any]):
if isinstance(a_ , a_):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(a_))
elif isinstance(a_ , a_):
return obj
return [tokenize_and_encode(a_) for o in obj]
logger.info('''Encoding dataset...''')
__a : Dict = load_rocstories_dataset(args.train_dataset)
__a : int = load_rocstories_dataset(args.eval_dataset)
__a : Optional[int] = (train_dataset, eval_dataset)
__a : List[Any] = tokenize_and_encode(a_)
# Compute the max input length for the Transformer
__a : List[Any] = model.config.n_positions // 2 - 2
__a : int = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
__a : Union[str, Any] = min(a_ , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a : Tuple = pre_process_datasets(a_ , a_ , a_ , *a_)
__a , __a : Tuple = tensor_datasets[0], tensor_datasets[1]
__a : List[str] = TensorDataset(*a_)
__a : Optional[Any] = RandomSampler(a_)
__a : str = DataLoader(a_ , sampler=a_ , batch_size=args.train_batch_size)
__a : List[str] = TensorDataset(*a_)
__a : Optional[int] = SequentialSampler(a_)
__a : Optional[Any] = DataLoader(a_ , sampler=a_ , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a : int = args.max_steps
__a : Optional[int] = args.max_steps // (len(a_) // args.gradient_accumulation_steps) + 1
else:
__a : str = len(a_) // args.gradient_accumulation_steps * args.num_train_epochs
__a : List[Any] = list(model.named_parameters())
__a : Optional[int] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a : List[str] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], '''weight_decay''': 0.0},
]
__a : int = AdamW(a_ , lr=args.learning_rate , eps=args.adam_epsilon)
__a : Union[str, Any] = get_linear_schedule_with_warmup(
a_ , num_warmup_steps=args.warmup_steps , num_training_steps=a_)
if args.do_train:
__a , __a , __a : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc='''Epoch'''):
__a : Dict = 0
__a : Dict = 0
__a : List[str] = tqdm(a_ , desc='''Training''')
for step, batch in enumerate(a_):
__a : Dict = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : str = batch
__a : List[Any] = model(a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a : int = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a : Tuple = '''Training loss: {:.2e} lr: {:.2e}'''.format(a_ , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a : Dict = model.module if hasattr(a_ , '''module''') else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a : int = os.path.join(args.output_dir , a_)
__a : str = os.path.join(args.output_dir , a_)
torch.save(model_to_save.state_dict() , a_)
model_to_save.config.to_json_file(a_)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
__a : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(a_)
if args.do_eval:
model.eval()
__a , __a : List[Any] = 0, 0
__a , __a : Union[str, Any] = 0, 0
for batch in tqdm(a_ , desc='''Evaluating'''):
__a : str = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : List[Any] = batch
with torch.no_grad():
__a , __a , __a , __a : str = model(
a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : List[str] = mc_logits.detach().cpu().numpy()
__a : Optional[Any] = mc_labels.to('''cpu''').numpy()
__a : str = accuracy(a_ , a_)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
__a : Tuple = eval_loss / nb_eval_steps
__a : List[str] = eval_accuracy / nb_eval_examples
__a : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
__a : List[str] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a : Dict = os.path.join(args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key in sorted(result.keys()):
logger.info(''' %s = %s''' , a_ , str(result[key]))
writer.write('''%s = %s\n''' % (key, str(result[key])))
if __name__ == "__main__":
main()
| 52 | 1 |
"""simple docstring"""
def __A ( a_ :List[Any]) -> Optional[int]:
__a , __a : int = [], []
while len(a_) > 1:
__a , __a : Any = min(a_), max(a_)
start.append(a_)
end.append(a_)
collection.remove(a_)
collection.remove(a_)
end.reverse()
return start + collection + end
if __name__ == "__main__":
A = input('''Enter numbers separated by a comma:\n''').strip()
A = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 52 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : List[str] = is_training
__a : Optional[Any] = use_attention_mask
__a : Optional[Any] = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Dict = intermediate_size
__a : List[str] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : Optional[int] = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = IFInpaintingPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self ):
return self._get_dummy_components()
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('''mps''' ):
__a : str = torch.manual_seed(_UpperCAmelCase )
else:
__a : List[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__a : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
__a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
__a : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _lowerCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def _lowerCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowerCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowerCamelCase ( self ):
self._test_save_load_local()
def _lowerCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 52 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''levit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 8, 12] , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = image_size
__a : List[Any] = num_channels
__a : Dict = kernel_size
__a : Optional[int] = stride
__a : Optional[int] = padding
__a : Dict = hidden_sizes
__a : int = num_attention_heads
__a : Optional[int] = depths
__a : str = key_dim
__a : Union[str, Any] = drop_path_rate
__a : Optional[Any] = patch_size
__a : Tuple = attention_ratio
__a : int = mlp_ratio
__a : int = initializer_range
__a : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4
| 52 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 |
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 52 | 1 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A = logging.get_logger(__name__)
logging.set_verbosity_info()
def __A ( a_ :str , a_ :str) -> Dict:
if "xprophetnet" in prophetnet_checkpoint_path:
__a : str = XLMProphetNetForConditionalGenerationOld.from_pretrained(a_)
__a , __a : int = XLMProphetNetForConditionalGeneration.from_pretrained(
a_ , output_loading_info=a_)
else:
__a : Union[str, Any] = ProphetNetForConditionalGenerationOld.from_pretrained(a_)
__a , __a : Dict = ProphetNetForConditionalGeneration.from_pretrained(
a_ , output_loading_info=a_)
__a : Optional[Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
__a : Any = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
__a : List[str] = key.split('''.''')
if attributes[0] == "lm_head":
__a : str = prophet
__a : Optional[int] = prophet_old
else:
__a : Dict = prophet.prophetnet
__a : List[Any] = prophet_old.model
__a : Dict = False
for attribute in attributes:
if attribute in mapping:
__a : Optional[int] = mapping[attribute]
if not hasattr(a_ , a_) and len(a_) > 0:
__a : Optional[Any] = attribute
elif hasattr(a_ , a_):
__a : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__a : Optional[Any] = old_model.weight
logger.info(F"""{attribute} is initialized.""")
__a : str = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__a : Optional[Any] = old_model.bias
logger.info(F"""{attribute} is initialized""")
__a : str = True
break
elif attribute in special_keys and hasattr(a_ , '''in_proj_weight'''):
__a : List[Any] = old_model.in_proj_weight.shape[0] // 3
__a : List[str] = getattr(a_ , a_)
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__a : str = nn.Parameter(old_model.in_proj_weight[:embed_dim, :])
__a : Optional[int] = nn.Parameter(old_model.in_proj_bias[:embed_dim])
elif attribute == "key_proj":
__a : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :])
__a : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim])
elif attribute == "value_proj":
__a : int = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :])
__a : Dict = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :])
__a : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
__a : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :])
__a : Dict = True
break
if attribute.isdigit():
__a : Optional[int] = model[int(a_)]
__a : List[str] = old_model[int(a_)]
else:
__a : Dict = getattr(a_ , a_)
if old_attribute == "":
__a : Dict = old_model
else:
if not hasattr(a_ , a_):
raise ValueError(F"""{old_model} does not have {old_attribute}""")
__a : Any = getattr(a_ , a_)
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""")
print(F"""Saving model to {pytorch_dump_folder_path}""")
prophet.save_pretrained(a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 52 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Dict = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : List[str] = edge
__a : Optional[int] = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Optional[int] = f.read().strip().split('''\n''')
__a : Dict = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : Tuple = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 52 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
A = logging.get_logger('''transformers.models.encodec''')
A = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
A = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
A = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
A = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
A = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
A = []
A = []
def __A ( a_ :Optional[Any] , a_ :Tuple , a_ :str , a_ :List[str] , a_ :int) -> int:
for attribute in key.split('''.'''):
__a : List[Any] = getattr(a_ , a_)
if weight_type is not None:
__a : List[str] = getattr(a_ , a_).shape
else:
__a : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""")
if weight_type == "weight":
__a : int = value
elif weight_type == "weight_g":
__a : int = value
elif weight_type == "weight_v":
__a : Any = value
elif weight_type == "bias":
__a : Optional[int] = value
elif weight_type == "running_mean":
__a : List[str] = value
elif weight_type == "running_var":
__a : Optional[int] = value
elif weight_type == "num_batches_tracked":
__a : Dict = value
elif weight_type == "weight_ih_l0":
__a : Any = value
elif weight_type == "weight_hh_l0":
__a : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
__a : Optional[int] = value
elif weight_type == "bias_hh_l0":
__a : Any = value
elif weight_type == "weight_ih_l1":
__a : Union[str, Any] = value
elif weight_type == "weight_hh_l1":
__a : Dict = value
elif weight_type == "bias_ih_l1":
__a : List[str] = value
elif weight_type == "bias_hh_l1":
__a : Optional[int] = value
else:
__a : Dict = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""")
def __A ( a_ :Dict , a_ :Union[str, Any]) -> Dict:
for key in ignore_keys:
if key.endswith('''.*'''):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
__a , __a : Optional[Any] = key.split('''.*.''')
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __A ( a_ :Any , a_ :str , a_ :Optional[Any]) -> Union[str, Any]:
__a : Optional[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
__a : str = MAPPING_24K
elif model_name == "encodec_48khz":
__a : List[Any] = MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""")
for name, value in orig_dict.items():
if should_ignore(a_ , a_):
logger.info(F"""{name} was ignored""")
continue
__a : Optional[Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__a , __a : Dict = key.split('''.*.''')
if prefix in name and suffix in name:
__a : Dict = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''') and name.endswith('''embed_avg'''):
continue
__a : Optional[int] = True
if "*" in mapped_key:
__a : List[Any] = name.split(a_)[0].split('''.''')[-2]
__a : Union[str, Any] = mapped_key.replace('''*''' , a_)
if "weight_g" in name:
__a : Tuple = '''weight_g'''
elif "weight_v" in name:
__a : Optional[Any] = '''weight_v'''
elif "weight_ih_l0" in name:
__a : Union[str, Any] = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
__a : Tuple = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
__a : List[str] = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
__a : str = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
__a : int = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
__a : Optional[int] = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
__a : Tuple = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
__a : List[str] = '''bias_hh_l1'''
elif "bias" in name:
__a : Optional[int] = '''bias'''
elif "weight" in name:
__a : str = '''weight'''
elif "running_mean" in name:
__a : int = '''running_mean'''
elif "running_var" in name:
__a : Optional[Any] = '''running_var'''
elif "num_batches_tracked" in name:
__a : List[Any] = '''num_batches_tracked'''
else:
__a : Tuple = None
set_recursively(a_ , a_ , a_ , a_ , a_)
continue
if not is_used:
unused_weights.append(a_)
logger.warning(F"""Unused weights: {unused_weights}""")
@torch.no_grad()
def __A ( a_ :int , a_ :List[Any] , a_ :Optional[Any] , a_ :Any=None , a_ :int=None , ) -> List[Any]:
if config_path is not None:
__a : Any = EncodecConfig.from_pretrained(a_)
else:
__a : Dict = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__a : int = [8, 5, 4, 4]
__a : List[Any] = [2.2]
__a : List[Any] = 64
__a : Tuple = 3_20_00
__a : str = 20_48
__a : Optional[int] = False
__a : List[Any] = False
__a : str = False
elif model_name == "encodec_48khz":
__a : List[str] = [8, 5, 4, 2]
__a : Dict = [3.0, 6.0, 1_2.0, 2_4.0]
__a : Optional[int] = 4_80_00
__a : List[Any] = 2
__a : Optional[int] = False
__a : Dict = '''time_group_norm'''
__a : Optional[int] = True
__a : Any = 1.0
__a : Optional[int] = 0.0_1
else:
raise ValueError(F"""Unknown model name: {model_name}""")
__a : Optional[Any] = EncodecModel(a_)
__a : Dict = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(a_)
__a : int = torch.load(a_)
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__a : List[str] = original_checkpoint['''best_state''']
recursively_load_weights(a_ , a_ , a_)
model.save_pretrained(a_)
if repo_id:
print('''Pushing to the hub...''')
feature_extractor.push_to_hub(a_)
model.push_to_hub(a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''trocr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
__a : List[str] = vocab_size
__a : Optional[Any] = d_model
__a : Optional[Any] = decoder_layers
__a : Union[str, Any] = decoder_attention_heads
__a : int = decoder_ffn_dim
__a : List[Any] = activation_function
__a : Any = max_position_embeddings
__a : Dict = dropout
__a : List[Any] = attention_dropout
__a : Optional[Any] = activation_dropout
__a : str = init_std
__a : List[str] = decoder_layerdrop
__a : Union[str, Any] = use_cache
__a : Optional[Any] = scale_embedding
__a : List[Any] = use_learned_position_embeddings
__a : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 52 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def __A ( a_ :np.ndarray) -> tuple[np.ndarray, np.ndarray]:
__a , __a : Any = np.shape(a_)
if rows != columns:
__a : Optional[int] = (
'''\'table\' has to be of square shaped array but got a '''
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(a_)
__a : List[Any] = np.zeros((rows, columns))
__a : List[Any] = np.zeros((rows, columns))
for i in range(a_):
for j in range(a_):
__a : Any = sum(lower[i][k] * upper[k][j] for k in range(a_))
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''')
__a : List[Any] = (table[i][j] - total) / upper[j][j]
__a : Dict = 1
for j in range(a_ , a_):
__a : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(a_))
__a : Dict = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __A ( a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Optional[Any] , a_ :Optional[int]=5) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''') == 1
__a : Optional[Any] = torch.tensor(tokenizer.encode(a_ , add_special_tokens=a_)).unsqueeze(0) # Batch size 1
__a : Dict = model(a_)[0] # The last hidden-state is the first element of the output tuple
__a : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__a : Any = logits[0, masked_index, :]
__a : Any = logits.softmax(dim=0)
__a , __a : Optional[Any] = prob.topk(k=a_ , dim=0)
__a : Optional[int] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(a_))])
__a : List[str] = tokenizer.mask_token
__a : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''')):
__a : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''')
if " {0}".format(a_) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(a_) , a_),
values[index].item(),
predicted_token,
))
else:
topk_filled_outputs.append(
(
masked_input.replace(a_ , a_),
values[index].item(),
predicted_token,
))
return topk_filled_outputs
A = CamembertTokenizer.from_pretrained('''camembert-base''')
A = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
A = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 52 | 1 |
"""simple docstring"""
def __A ( a_ :Optional[Any] , a_ :Optional[Any]) -> List[Any]:
__a : List[str] = [0 for i in range(r + 1)]
# nc0 = 1
__a : str = 1
for i in range(1 , n + 1):
# to compute current row from previous row.
__a : List[str] = min(a_ , a_)
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 52 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[int] = [10, 20, 30, 40, 50, 60]
__a : Union[str, Any] = [2, 4, 6, 8, 10, 12]
__a : List[str] = 100
self.assertEqual(kp.calc_profit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 210 )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Weight can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Profit can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(
_UpperCAmelCase , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 52 | 1 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
A = '''bart'''
A = True
@st.cache(allow_output_mutation=a_)
def __A ( ) -> Dict:
if LOAD_DENSE_INDEX:
__a : Optional[Any] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''')
__a : Dict = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''').to('''cuda:0''')
__a : Union[str, Any] = qar_model.eval()
else:
__a , __a : Optional[int] = (None, None)
if MODEL_TYPE == "bart":
__a : List[Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''')
__a : Tuple = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''').to('''cuda:0''')
__a : Optional[int] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''')
sas_model.load_state_dict(save_dict['''model'''])
__a : List[Any] = sas_model.eval()
else:
__a , __a : List[str] = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''')
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a_)
def __A ( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
__a : str = faiss.StandardGpuResources()
__a : Tuple = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''')['''train''']
__a : Dict = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
__a : Dict = faiss.IndexFlatIP(1_28)
__a : Optional[int] = faiss.index_cpu_to_gpu(a_ , 1 , a_)
wikiaab_gpu_index_flat.add(a_) # TODO fix for larger GPU
else:
__a , __a : List[str] = (None, None)
__a : int = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}])
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a_)
def __A ( ) -> Union[str, Any]:
__a : Union[str, Any] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''')
__a : List[str] = elia['''train_eli5''']
__a : Tuple = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28))
__a : Any = faiss.IndexFlatIP(1_28)
eli5_train_q_index.add(a_)
return (elia_train, eli5_train_q_index)
A , A , A = load_indexes()
A , A , A , A = load_models()
A , A = load_train_data()
def __A ( a_ :str , a_ :str=10) -> Dict:
__a : List[Any] = embed_questions_for_retrieval([question] , a_ , a_)
__a , __a : Tuple = eli5_train_q_index.search(a_ , a_)
__a : Union[str, Any] = [elia_train[int(a_)] for i in I[0]]
return nn_examples
def __A ( a_ :List[Any] , a_ :int="wiki40b" , a_ :Any="dense" , a_ :Dict=10) -> Any:
if source == "none":
__a , __a : Any = (''' <P> '''.join(['''''' for _ in range(11)]).strip(), [])
else:
if method == "dense":
__a , __a : Optional[Any] = query_qa_dense_index(
a_ , a_ , a_ , a_ , a_ , a_)
else:
__a , __a : int = query_es_index(
a_ , a_ , index_name='''english_wiki40b_snippets_100w''' , n_results=a_ , )
__a : Any = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__a : Any = '''question: {} context: {}'''.format(a_ , a_)
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a_: None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a_: None),
})
def __A ( a_ :Tuple , a_ :Any , a_ :Tuple , a_ :List[Any]=64 , a_ :int=2_56 , a_ :Any=False , a_ :Dict=2 , a_ :Dict=0.9_5 , a_ :List[Any]=0.8) -> List[Any]:
with torch.no_grad():
__a : str = qa_sas_generate(
a_ , a_ , a_ , num_answers=1 , num_beams=a_ , min_len=a_ , max_len=a_ , do_sample=a_ , temp=a_ , top_p=a_ , top_k=a_ , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
A = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
A = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
A = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
A = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
A = st.sidebar.checkbox('''Demo options''')
if demo_options:
A = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
A = action_list.index(action_st)
A = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
A = show_type == '''Show full text of passages'''
else:
A = 3
A = True
A = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
A = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
A = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
A = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
A = '''wiki40b'''
A = '''dense'''
A = '''beam'''
A = 2
A = 64
A = 256
A = None
A = None
A = st.sidebar.checkbox('''Generation options''')
if generate_options:
A = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
A = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
A = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
A = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
A = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
A = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
A = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
A = None
# start main text
A = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
A = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
A = st.text_input('''Enter your question here:''', '''''')
else:
A = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
A , A = make_support(question, source=wiki_source, method='''dense''', n_results=10)
A , A = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
A = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
A = support_list[:10]
A = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
A , A = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
A , A = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
A = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
A = res[1].strip()
if sec_titles == "":
A = '''[{}]({})'''.format(res[0], wiki_url)
else:
A = sec_titles.split(''' & ''')
A = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
A = find_nearest_training(question)
A = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
A = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
A = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''llama'''
__lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32000 , _UpperCAmelCase=4096 , _UpperCAmelCase=11008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : Dict = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : str = hidden_size
__a : List[str] = intermediate_size
__a : Any = num_hidden_layers
__a : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__a : Union[str, Any] = num_attention_heads
__a : Optional[int] = num_key_value_heads
__a : Dict = hidden_act
__a : Union[str, Any] = initializer_range
__a : int = rms_norm_eps
__a : Optional[int] = pretraining_tp
__a : Optional[Any] = use_cache
__a : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__a : Tuple = self.rope_scaling.get('''type''' , _UpperCAmelCase )
__a : Optional[int] = self.rope_scaling.get('''factor''' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 52 | 1 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __A ( a_ :List[Any] , a_ :List[Any]) -> List[str]:
__a : List[str] = checkpoint
__a : Dict = {}
__a : Tuple = vae_state_dict['''encoder.conv_in.weight''']
__a : Tuple = vae_state_dict['''encoder.conv_in.bias''']
__a : List[str] = vae_state_dict['''encoder.conv_out.weight''']
__a : int = vae_state_dict['''encoder.conv_out.bias''']
__a : List[str] = vae_state_dict['''encoder.norm_out.weight''']
__a : str = vae_state_dict['''encoder.norm_out.bias''']
__a : Optional[int] = vae_state_dict['''decoder.conv_in.weight''']
__a : List[Any] = vae_state_dict['''decoder.conv_in.bias''']
__a : str = vae_state_dict['''decoder.conv_out.weight''']
__a : int = vae_state_dict['''decoder.conv_out.bias''']
__a : Dict = vae_state_dict['''decoder.norm_out.weight''']
__a : List[str] = vae_state_dict['''decoder.norm_out.bias''']
__a : List[Any] = vae_state_dict['''quant_conv.weight''']
__a : Tuple = vae_state_dict['''quant_conv.bias''']
__a : int = vae_state_dict['''post_quant_conv.weight''']
__a : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
__a : Tuple = len({'''.'''.join(layer.split('''.''')[:3]) for layer in vae_state_dict if '''encoder.down''' in layer})
__a : Optional[int] = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(a_)
}
# Retrieves the keys for the decoder up blocks only
__a : Union[str, Any] = len({'''.'''.join(layer.split('''.''')[:3]) for layer in vae_state_dict if '''decoder.up''' in layer})
__a : List[Any] = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(a_)
}
for i in range(a_):
__a : Dict = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
__a : List[Any] = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""")
__a : Any = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""")
__a : List[str] = renew_vae_resnet_paths(a_)
__a : Union[str, Any] = {'''old''': F"""down.{i}.block""", '''new''': F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_)
__a : List[str] = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
__a : Tuple = 2
for i in range(1 , num_mid_res_blocks + 1):
__a : List[str] = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
__a : Union[str, Any] = renew_vae_resnet_paths(a_)
__a : str = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_)
__a : List[Any] = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
__a : List[str] = renew_vae_attention_paths(a_)
__a : Optional[Any] = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_)
conv_attn_to_linear(a_)
for i in range(a_):
__a : str = num_up_blocks - 1 - i
__a : Any = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
__a : Optional[int] = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
__a : List[str] = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
__a : Union[str, Any] = renew_vae_resnet_paths(a_)
__a : Any = {'''old''': F"""up.{block_id}.block""", '''new''': F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_)
__a : str = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
__a : List[str] = 2
for i in range(1 , num_mid_res_blocks + 1):
__a : Union[str, Any] = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
__a : Optional[Any] = renew_vae_resnet_paths(a_)
__a : List[str] = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_)
__a : Any = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
__a : Any = renew_vae_attention_paths(a_)
__a : Dict = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_)
conv_attn_to_linear(a_)
return new_checkpoint
def __A ( a_ :str , a_ :str , ) -> Tuple:
# Only support V1
__a : List[Any] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''')
__a : Tuple = io.BytesIO(r.content)
__a : Tuple = OmegaConf.load(a_)
__a : Union[str, Any] = 5_12
__a : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors'''):
from safetensors import safe_open
__a : Tuple = {}
with safe_open(a_ , framework='''pt''' , device='''cpu''') as f:
for key in f.keys():
__a : str = f.get_tensor(a_)
else:
__a : List[str] = torch.load(a_ , map_location=a_)['''state_dict''']
# Convert the VAE model.
__a : int = create_vae_diffusers_config(a_ , image_size=a_)
__a : Tuple = custom_convert_ldm_vae_checkpoint(a_ , a_)
__a : Optional[int] = AutoencoderKL(**a_)
vae.load_state_dict(a_)
vae.save_pretrained(a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
A = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 52 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__a : int = parent
__a : str = batch_size
__a : List[Any] = num_channels
__a : Union[str, Any] = image_size
__a : List[Any] = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 20}
__a : str = do_thumbnail
__a : str = do_align_axis
__a : Dict = do_pad
__a : Union[str, Any] = do_normalize
__a : List[str] = image_mean
__a : Optional[int] = image_std
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = DonutImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : Tuple = DonutImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
def _lowerCamelCase ( self ):
__a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def _lowerCamelCase ( self ):
pass
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : str = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a : List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 52 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''distilbert'''
__lowerCAmelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=512 , _UpperCAmelCase=False , _UpperCAmelCase=6 , _UpperCAmelCase=12 , _UpperCAmelCase=768 , _UpperCAmelCase=4 * 768 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.2 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
__a : Dict = vocab_size
__a : List[Any] = max_position_embeddings
__a : str = sinusoidal_pos_embds
__a : List[str] = n_layers
__a : List[str] = n_heads
__a : Dict = dim
__a : int = hidden_dim
__a : Any = dropout
__a : List[str] = attention_dropout
__a : Optional[int] = activation
__a : Optional[int] = initializer_range
__a : Tuple = qa_dropout
__a : Dict = seq_classif_dropout
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
if self.task == "multiple-choice":
__a : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a : str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 52 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int]) -> int:
if not nums:
return 0
__a : Any = nums[0]
__a : Optional[Any] = 0
for num in nums[1:]:
__a , __a : Optional[Any] = (
max_excluding + num,
max(a_ , a_),
)
return max(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 1 |
"""simple docstring"""
from collections import defaultdict
def __A ( a_ :int) -> int:
__a : Dict = 1
__a : Any = True
for v in tree[start]:
if v not in visited:
ret += dfs(a_)
if ret % 2 == 0:
cuts.append(a_)
return ret
def __A ( ) -> List[Any]:
dfs(1)
if __name__ == "__main__":
A , A = 10, 9
A = defaultdict(list)
A = {}
A = []
A = 0
A = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 52 |
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A = '''▁'''
A = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BigBirdTokenizer
__lowerCAmelCase = BigBirdTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def _lowerCamelCase ( self ):
super().setUp()
__a : Dict = self.tokenizer_class(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : List[str] = '''<s>'''
__a : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(_UpperCAmelCase ) , 1004 )
def _lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__a : Dict = self.get_tokenizer()
__a : Any = self.get_rust_tokenizer()
__a : int = '''I was born in 92000, and this is falsé.'''
__a : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
__a : List[str] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__a : Any = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = self.get_rust_tokenizer()
__a : Tuple = tokenizer.encode(_UpperCAmelCase )
__a : List[Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = BigBirdTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
__a : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
__a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _lowerCamelCase ( self ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def _lowerCamelCase ( self ):
__a : str = '''Hello World!'''
__a : str = [65, 18536, 2260, 101, 66]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def _lowerCamelCase ( self ):
__a : Any = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
__a : Optional[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
__a : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__a : List[str] = ''' '''.join(_UpperCAmelCase )
__a : Tuple = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Any = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=_UpperCAmelCase )
__a : Optional[Any] = BigBirdConfig(attention_type='''original_full''' )
__a : Tuple = BigBirdModel(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
__a : Union[str, Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
__a : List[Any] = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def _lowerCamelCase ( self ):
# fmt: off
__a : Optional[Any] = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 52 | 1 |
"""simple docstring"""
from random import randint, random
def __A ( a_ :int , a_ :int , a_ :int , a_ :bool = False , a_ :bool = False , a_ :int = 5 , ) -> list:
__a : List[Any] = [[-1] * number_of_cells] # Create a highway without any car
__a : Optional[int] = 0
__a : str = max(a_ , 0)
while i < number_of_cells:
__a : Optional[int] = (
randint(0 , a_) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __A ( a_ :list , a_ :int) -> int:
__a : List[str] = 0
__a : Union[str, Any] = highway_now[car_index + 1 :]
for cell in range(len(a_)): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(a_ , -1)
def __A ( a_ :list , a_ :float , a_ :int) -> list:
__a : int = len(a_)
# Beforce calculations, the highway is empty
__a : Union[str, Any] = [-1] * number_of_cells
for car_index in range(a_):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__a : Any = min(highway_now[car_index] + 1 , a_)
# Number of empty cell before the next car
__a : Tuple = get_distance(a_ , a_) - 1
# We can't have the car causing an accident
__a : str = min(next_highway[car_index] , a_)
if random() < probability:
# Randomly, a driver will slow down
__a : str = max(next_highway[car_index] - 1 , 0)
return next_highway
def __A ( a_ :list , a_ :int , a_ :float , a_ :int) -> list:
__a : List[str] = len(highway[0])
for i in range(a_):
__a : int = update(highway[i] , a_ , a_)
__a : Tuple = [-1] * number_of_cells
for car_index in range(a_):
__a : List[str] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__a : Dict = (car_index + speed) % number_of_cells
# Commit the change of position
__a : Optional[Any] = speed
highway.append(a_)
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''convnextv2'''
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=224 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : List[str] = num_channels
__a : str = patch_size
__a : Dict = num_stages
__a : List[str] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
__a : List[str] = [3, 3, 9, 3] if depths is None else depths
__a : List[Any] = hidden_act
__a : Any = initializer_range
__a : Optional[int] = layer_norm_eps
__a : List[Any] = drop_path_rate
__a : Any = image_size
__a : str = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__a , __a : Optional[int] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 52 | 1 |
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def __A ( a_ :Any , a_ :int=None) -> List[str]:
require_version(deps[pkg] , a_)
| 52 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (DDPMScheduler,)
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : int = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_UpperCAmelCase )
return config
def _lowerCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Dict = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def _lowerCamelCase ( self ):
__a : int = self.scheduler_classes[0]
__a : int = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**_UpperCAmelCase )
__a : int = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[Any] = self.dummy_sample_deter
__a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Optional[int] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : List[Any] = pred_prev_sample
__a : int = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = self.scheduler_classes[0]
__a : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a : int = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[str] = self.dummy_sample_deter
__a : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Dict = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : Optional[int] = pred_prev_sample
__a : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Any = self.get_scheduler_config()
__a : str = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
__a : List[Any] = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
__a : Union[str, Any] = -1
else:
__a : str = timesteps[i + 1]
__a : Dict = scheduler.previous_timestep(_UpperCAmelCase )
__a : str = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
__a : Optional[int] = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : List[str] = scheduler_class(**_UpperCAmelCase )
__a : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 52 | 1 |
"""simple docstring"""
import requests
A = '''''' # <-- Put your OpenWeatherMap appid here!
A = '''https://api.openweathermap.org/data/2.5/'''
def __A ( a_ :str = "Chicago" , a_ :str = APPID) -> dict:
return requests.get(URL_BASE + '''weather''' , params=locals()).json()
def __A ( a_ :str = "Kolkata, India" , a_ :str = APPID) -> dict:
return requests.get(URL_BASE + '''forecast''' , params=locals()).json()
def __A ( a_ :float = 5_5.6_8 , a_ :float = 1_2.5_7 , a_ :str = APPID) -> dict:
return requests.get(URL_BASE + '''onecall''' , params=locals()).json()
if __name__ == "__main__":
from pprint import pprint
while True:
A = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 52 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
A = random.Random()
def __A ( a_ :Tuple , a_ :Dict=1.0 , a_ :str=None , a_ :List[Any]=None) -> Dict:
if rng is None:
__a : Any = global_rng
__a : Tuple = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=2048 , _UpperCAmelCase=128 , _UpperCAmelCase=1 , _UpperCAmelCase=512 , _UpperCAmelCase=30 , _UpperCAmelCase=44100 , ):
__a : Any = parent
__a : Tuple = batch_size
__a : Tuple = min_seq_length
__a : List[str] = max_seq_length
__a : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a : Tuple = spectrogram_length
__a : int = feature_size
__a : int = num_audio_channels
__a : Tuple = hop_length
__a : List[Any] = chunk_length
__a : Any = sampling_rate
def _lowerCamelCase ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _lowerCamelCase ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
__a : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : Optional[Any] = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = TvltFeatureExtractor
def _lowerCamelCase ( self ):
__a : Optional[Any] = TvltFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
__a : int = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''hop_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''sampling_rate''' ) )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[str] = feat_extract_first.save_pretrained(_UpperCAmelCase )[0]
check_json_file_has_correct_format(_UpperCAmelCase )
__a : Union[str, Any] = self.feature_extraction_class.from_pretrained(_UpperCAmelCase )
__a : Tuple = feat_extract_first.to_dict()
__a : List[Any] = feat_extract_second.to_dict()
__a : int = dict_first.pop('''mel_filters''' )
__a : List[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : int = os.path.join(_UpperCAmelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(_UpperCAmelCase )
__a : Optional[Any] = self.feature_extraction_class.from_json_file(_UpperCAmelCase )
__a : Optional[Any] = feat_extract_first.to_dict()
__a : Any = feat_extract_second.to_dict()
__a : Optional[Any] = dict_first.pop('''mel_filters''' )
__a : Dict = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
# Initialize feature_extractor
__a : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Union[str, Any] = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__a : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__a : int = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__a : List[Any] = feature_extractor(
_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 , mask_audio=_UpperCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__a : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a : Any = np.asarray(_UpperCAmelCase )
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__a : int = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
__a : List[str] = self._load_datasamples(1 )
__a : Tuple = TvltFeatureExtractor()
__a : Optional[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__a : Dict = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _UpperCAmelCase , atol=1e-4 ) )
| 52 | 1 |
"""simple docstring"""
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
A = logging.getLogger()
A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def _lowerCamelCase ( self , _UpperCAmelCase ):
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
__a : Union[str, Any] = {'''source''': '''What is love ?''', '''target''': '''life'''}
__a : Tuple = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
__a : Tuple = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(_UpperCAmelCase , f"""{split}.{field}""" ) , '''w''' ) as f:
f.write(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = "pytorch" ):
__a : Optional[int] = self.get_auto_remove_tmp_dir()
__a : Optional[int] = os.path.join(_UpperCAmelCase , '''output''' )
__a : Optional[int] = os.path.join(_UpperCAmelCase , '''data''' )
self._create_dummy_data(data_dir=_UpperCAmelCase )
__a : int = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
__a : Dict = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_UpperCAmelCase , env=self.get_env() )
__a : Optional[Any] = os.path.join(_UpperCAmelCase , '''metrics.json''' )
with open(_UpperCAmelCase ) as f:
__a : Dict = json.load(_UpperCAmelCase )
return result
@require_torch_gpu
def _lowerCamelCase ( self ):
__a : Optional[int] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def _lowerCamelCase ( self ):
__a : Optional[int] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def _lowerCamelCase ( self ):
__a : Dict = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _lowerCamelCase ( self ):
__a : Dict = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
| 52 |
"""simple docstring"""
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : List[Any] = text, pattern
__a , __a : Tuple = len(_UpperCAmelCase ), len(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self ):
# searches pattern in text and returns index positions
__a : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
__a : List[str] = self.mismatch_in_text(_UpperCAmelCase )
if mismatch_index == -1:
positions.append(_UpperCAmelCase )
else:
__a : Tuple = self.match_in_pattern(self.text[mismatch_index] )
__a : Optional[int] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A = '''ABAABA'''
A = '''AB'''
A = BoyerMooreSearch(text, pattern)
A = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 52 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
A = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
A = F'https://www.google.com/search?q={query}&num=100'
A = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
A = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
A = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52 | 1 |
"""simple docstring"""
A = 8.314462 # Unit - J mol-1 K-1
def __A ( a_ :float , a_ :float , a_ :float) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''')
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def __A ( a_ :float , a_ :float , a_ :float) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''')
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 52 |
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = False
__lowerCAmelCase = 3.0
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a : List[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__a : int = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _UpperCAmelCase )
@require_multi_gpu
def _lowerCamelCase ( self ):
__a : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A = Accelerator(kwargs_handlers=[ddp_scaler])
A = torch.nn.Linear(100, 200)
A = accelerator.prepare(model)
# Check the values changed in kwargs
A = ''''''
A = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 52 | 1 |
"""simple docstring"""
A = 0 # The first color of the flag.
A = 1 # The second color of the flag.
A = 2 # The third color of the flag.
A = (red, white, blue)
def __A ( a_ :list) -> list:
if not sequence:
return []
if len(a_) == 1:
return list(a_)
__a : str = 0
__a : List[str] = len(a_) - 1
__a : Tuple = 0
while mid <= high:
if sequence[mid] == colors[0]:
__a , __a : Any = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__a , __a : Tuple = sequence[high], sequence[mid]
high -= 1
else:
__a : Union[str, Any] = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(a_)
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
A = input('''Enter numbers separated by commas:\n''').strip()
A = [int(item.strip()) for item in user_input.split(''',''')]
print(F'{dutch_national_flag_sort(unsorted)}')
| 52 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __A ( a_ :int) -> bool:
__a : int = int(number**0.5)
return number == sq * sq
def __A ( a_ :int , a_ :int , a_ :int , a_ :int , a_ :int , a_ :int) -> tuple[int, int]:
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(a_ , a_)
top //= hcf
bottom //= hcf
return top, bottom
def __A ( a_ :int = 35) -> int:
__a : set = set()
__a : int
__a : Fraction = Fraction(0)
__a : tuple[int, int]
for x_num in range(1 , order + 1):
for x_den in range(x_num + 1 , order + 1):
for y_num in range(1 , order + 1):
for y_den in range(y_num + 1 , order + 1):
# n=1
__a : str = x_num * y_den + x_den * y_num
__a : Any = x_den * y_den
__a : Optional[Any] = gcd(a_ , a_)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
a_ , a_ , a_ , a_ , a_ , a_)
unique_s.add(a_)
# n=2
__a : List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : str = x_den * x_den * y_den * y_den
if is_sq(a_) and is_sq(a_):
__a : Any = int(sqrt(a_))
__a : Union[str, Any] = int(sqrt(a_))
__a : Optional[Any] = gcd(a_ , a_)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[str] = add_three(
a_ , a_ , a_ , a_ , a_ , a_)
unique_s.add(a_)
# n=-1
__a : Optional[Any] = x_num * y_num
__a : Optional[Any] = x_den * y_num + x_num * y_den
__a : int = gcd(a_ , a_)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[Any] = add_three(
a_ , a_ , a_ , a_ , a_ , a_)
unique_s.add(a_)
# n=2
__a : List[Any] = x_num * x_num * y_num * y_num
__a : Union[str, Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(a_) and is_sq(a_):
__a : str = int(sqrt(a_))
__a : Tuple = int(sqrt(a_))
__a : Union[str, Any] = gcd(a_ , a_)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : int = add_three(
a_ , a_ , a_ , a_ , a_ , a_)
unique_s.add(a_)
for num, den in unique_s:
total += Fraction(a_ , a_)
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 52 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A = get_logger(__name__)
A = Path(__file__).parent / '''model_card_template.md'''
A = uuida().hex
A = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __A ( a_ :Union[Dict, str, None] = None) -> str:
__a : Union[str, Any] = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''').upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(a_ , a_):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items())
elif isinstance(a_ , a_):
ua += "; " + user_agent
return ua
def __A ( a_ :str , a_ :Optional[str] = None , a_ :Optional[str] = None) -> Optional[int]:
if token is None:
__a : Any = HfFolder.get_token()
if organization is None:
__a : List[Any] = whoami(a_)['''name''']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def __A ( a_ :Union[str, Any] , a_ :List[str]) -> Optional[Any]:
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''')
if hasattr(a_ , '''local_rank''') and args.local_rank not in [-1, 0]:
return
__a : int = args.hub_token if hasattr(a_ , '''hub_token''') else None
__a : Any = get_full_repo_name(a_ , token=a_)
__a : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=a_ , model_name=a_ , repo_name=a_ , dataset_name=args.dataset_name if hasattr(a_ , '''dataset_name''') else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(a_ , '''gradient_accumulation_steps''') else None
) , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta1''') else None , adam_betaa=args.adam_betaa if hasattr(a_ , '''adam_beta2''') else None , adam_weight_decay=args.adam_weight_decay if hasattr(a_ , '''adam_weight_decay''') else None , adam_epsilon=args.adam_epsilon if hasattr(a_ , '''adam_epsilon''') else None , lr_scheduler=args.lr_scheduler if hasattr(a_ , '''lr_scheduler''') else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(a_ , '''lr_warmup_steps''') else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(a_ , '''ema_inv_gamma''') else None , ema_power=args.ema_power if hasattr(a_ , '''ema_power''') else None , ema_max_decay=args.ema_max_decay if hasattr(a_ , '''ema_max_decay''') else None , mixed_precision=args.mixed_precision , )
__a : List[Any] = os.path.join(args.output_dir , '''README.md''')
model_card.save(a_)
def __A ( a_ :Optional[str] , a_ :Optional[str] = None) -> Union[str, Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
__a : Any = str(Path(a_).as_posix())
__a : Optional[int] = re.search(R'''snapshots/([^/]+)/''' , a_)
if search is None:
return None
__a : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(a_) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
A = os.path.join(hf_cache_home, '''diffusers''')
def __A ( a_ :Optional[str] = None , a_ :Optional[str] = None) -> None:
if new_cache_dir is None:
__a : Dict = DIFFUSERS_CACHE
if old_cache_dir is None:
__a : List[Any] = old_diffusers_cache
__a : Union[str, Any] = Path(a_).expanduser()
__a : Dict = Path(a_).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*'''):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__a : List[Any] = new_cache_dir / old_blob_path.relative_to(a_)
new_blob_path.parent.mkdir(parents=a_ , exist_ok=a_)
os.replace(a_ , a_)
try:
os.symlink(a_ , a_)
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''')
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
A = 0
else:
with open(cache_version_file) as f:
try:
A = int(f.read())
except ValueError:
A = 0
if cache_version < 1:
A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
A = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __A ( a_ :str , a_ :Optional[str] = None) -> str:
if variant is not None:
__a : Dict = weights_name.split('''.''')
__a : List[Any] = splits[:-1] + [variant] + splits[-1:]
__a : Tuple = '''.'''.join(a_)
return weights_name
def __A ( a_ :List[Any] , *,
a_ :Union[str, Any] , a_ :Dict , a_ :Union[str, Any] , a_ :Optional[int] , a_ :str , a_ :Any , a_ :str , a_ :Optional[int] , a_ :str , a_ :Tuple , a_ :List[str]=None , ) -> Dict:
__a : int = str(a_)
if os.path.isfile(a_):
return pretrained_model_name_or_path
elif os.path.isdir(a_):
if os.path.isfile(os.path.join(a_ , a_)):
# Load from a PyTorch checkpoint
__a : Union[str, Any] = os.path.join(a_ , a_)
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(a_ , a_ , a_)):
__a : Optional[Any] = os.path.join(a_ , a_ , a_)
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""")
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(a_).base_version) >= version.parse('''0.20.0''')
):
try:
__a : Any = hf_hub_download(
a_ , filename=_add_variant(a_ , a_) , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , a_ , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(a_ , a_)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(a_ , a_)}' so that the correct variant file can be added.""" , a_ , )
try:
# 2. Load model file as usual
__a : Optional[Any] = hf_hub_download(
a_ , filename=a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , use_auth_token=a_ , user_agent=a_ , subfolder=a_ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''')
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'''this model name. Check the model page at '''
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""")
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""")
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""")
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''')
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""")
| 52 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_text_model'''
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = vocab_size
__a : Optional[int] = hidden_size
__a : Dict = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[int] = hidden_act
__a : List[Any] = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = initializer_range
__a : Dict = layer_norm_eps
__a : Any = position_embedding_type
__a : Dict = use_cache
__a : Dict = pad_token_id
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : List[str] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align_vision_model'''
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.2_5 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = 0.0_0_1 , _UpperCAmelCase = 0.9_9 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Tuple = num_channels
__a : str = image_size
__a : List[Any] = width_coefficient
__a : Optional[int] = depth_coefficient
__a : Union[str, Any] = depth_divisor
__a : int = kernel_sizes
__a : Dict = in_channels
__a : List[str] = out_channels
__a : Any = depthwise_padding
__a : str = strides
__a : Optional[Any] = num_block_repeats
__a : Optional[Any] = expand_ratios
__a : Any = squeeze_expansion_ratio
__a : int = hidden_act
__a : Union[str, Any] = hidden_dim
__a : Union[str, Any] = pooling_type
__a : Tuple = initializer_range
__a : List[str] = batch_norm_eps
__a : List[Any] = batch_norm_momentum
__a : Union[str, Any] = drop_connect_rate
__a : List[Any] = sum(_UpperCAmelCase ) * 4
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Optional[Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
__a : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''align'''
__lowerCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=640 , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
if text_config is None:
__a : Dict = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
__a : Any = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
__a : Any = AlignTextConfig(**_UpperCAmelCase )
__a : Any = AlignVisionConfig(**_UpperCAmelCase )
__a : Optional[int] = projection_dim
__a : Union[str, Any] = temperature_init_value
__a : int = initializer_range
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Tuple = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.__class__.model_type
return output
| 52 | 1 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = RobertaTokenizer
__lowerCAmelCase = RobertaTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = {'''cls_token''': '''<s>'''}
def _lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__a : List[str] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__a : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__a : Optional[Any] = {'''unk_token''': '''<unk>'''}
__a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : str = '''lower newer'''
__a : Any = '''lower newer'''
return input_text, output_text
def _lowerCamelCase ( self ):
__a : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : int = '''lower newer'''
__a : int = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__a : Optional[int] = tokenizer.tokenize(_UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : str = tokens + [tokenizer.unk_token]
__a : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=_UpperCAmelCase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=_UpperCAmelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _lowerCamelCase ( self ):
__a : List[str] = self.tokenizer_class.from_pretrained('''roberta-base''' )
__a : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_UpperCAmelCase )
__a : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_UpperCAmelCase )
__a : List[Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__a : str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__a : str = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__a : Tuple = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.get_tokenizer()
__a : str = '''Encode this sequence.'''
__a : List[Any] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__a : Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__a : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Union[str, Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
__a : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__a : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing spaces after special tokens
__a : Dict = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )} ) # mask token has a left space
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
__a : List[Any] = '''Encode <mask> sequence'''
__a : Optional[Any] = '''Encode <mask>sequence'''
__a : Tuple = tokenizer.encode(_UpperCAmelCase )
__a : List[Any] = encoded.index(_UpperCAmelCase )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = tokenizer.encode(_UpperCAmelCase )
__a : Tuple = encoded.index(_UpperCAmelCase )
__a : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__a : Tuple = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
__a : List[str] = '''A, <mask> AllenNLP sentence.'''
__a : Dict = tokenizer_r.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
__a : Tuple = tokenizer_p.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__a : int = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__a : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _lowerCamelCase ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__a : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__a : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , _UpperCAmelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , _UpperCAmelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__a : List[Any] = f"""{text_of_1_token} {text_of_1_token}"""
__a : Tuple = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Dict = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : List[str] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : List[Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ) + 1, len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Dict = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : Dict = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Optional[int] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCAmelCase ), len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : Optional[int] = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Tuple = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ) + 1, 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : int = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Optional[Any] = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
__a : Dict = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , use_fast=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase )
__a : Tuple = tokenizer_r(_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCAmelCase ), 1 + len(_UpperCAmelCase ) + 1 + len(_UpperCAmelCase )) , )
| 52 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''num_encoder_blocks''' ) )
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=64 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=[2, 2, 2, 2] , _UpperCAmelCase=[8, 4, 2, 1] , _UpperCAmelCase=[16, 32, 64, 128] , _UpperCAmelCase=[1, 4, 8, 16] , _UpperCAmelCase=[1, 2, 4, 8] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=None , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : Optional[int] = image_size
__a : List[str] = num_channels
__a : List[str] = num_encoder_blocks
__a : int = sr_ratios
__a : str = depths
__a : Any = hidden_sizes
__a : Optional[int] = downsampling_rates
__a : List[Any] = num_attention_heads
__a : Optional[Any] = is_training
__a : int = use_labels
__a : List[Any] = hidden_act
__a : Any = hidden_dropout_prob
__a : Optional[Any] = attention_probs_dropout_prob
__a : Optional[int] = initializer_range
__a : Any = num_labels
__a : str = scope
def _lowerCamelCase ( self ):
__a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : int = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = SegformerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Any = model(_UpperCAmelCase )
__a : Tuple = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = self.num_labels
__a : int = SegformerForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__a : Optional[int] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = 1
__a : Optional[Any] = SegformerForSemanticSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_UpperCAmelCase )
__a : str = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCamelCase ( self ):
__a : str = self.prepare_config_and_inputs()
__a , __a , __a : str = config_and_inputs
__a : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : List[Any] = SegformerModelTester(self )
__a : Dict = SegformerConfigTester(self , config_class=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_UpperCAmelCase )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_UpperCAmelCase )
__a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Tuple = [*signature.parameters.keys()]
__a : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Tuple = True
for model_class in self.all_model_classes:
__a : List[str] = True
__a : Any = False
__a : str = True
__a : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__a : int = outputs.attentions
__a : int = sum(self.model_tester.depths )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Tuple = True
__a : int = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__a : Dict = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first attentions (first block, first layer)
__a : int = (self.model_tester.image_size // 4) ** 2
__a : int = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__a : int = (self.model_tester.image_size // 32) ** 2
__a : Dict = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__a : int = len(_UpperCAmelCase )
# Check attention is always last and order is fine
__a : Union[str, Any] = True
__a : Tuple = True
__a : List[str] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : Tuple = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCAmelCase ) )
__a : Tuple = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first attentions (first block, first layer)
__a : Optional[Any] = (self.model_tester.image_size // 4) ** 2
__a : List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowerCamelCase ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__a : Optional[Any] = outputs.hidden_states
__a : Optional[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
if not self.model_tester.is_training:
return
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ):
continue
__a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
__a : Optional[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
__a : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCamelCase ( self ):
pass
@slow
def _lowerCamelCase ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = SegformerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __A ( ) -> Optional[int]:
__a : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
# only resize + normalize
__a : List[str] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
__a : List[Any] = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_UpperCAmelCase )
__a : Union[str, Any] = prepare_img()
__a : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__a : Optional[Any] = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
__a : Dict = model(_UpperCAmelCase )
__a : int = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__a : List[str] = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def _lowerCamelCase ( self ):
# only resize + normalize
__a : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
__a : str = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(_UpperCAmelCase )
__a : List[str] = prepare_img()
__a : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__a : Union[str, Any] = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
__a : str = model(_UpperCAmelCase )
__a : Optional[int] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__a : Optional[Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-1 ) )
@slow
def _lowerCamelCase ( self ):
# only resize + normalize
__a : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
__a : Tuple = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_UpperCAmelCase )
__a : List[str] = prepare_img()
__a : List[str] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__a : Union[str, Any] = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
__a : Tuple = model(_UpperCAmelCase )
__a : int = outputs.logits.detach().cpu()
__a : List[Any] = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(500, 300)] )
__a : str = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__a : int = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__a : Optional[int] = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 52 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A = logging.getLogger(__name__)
def __A ( a_ :Union[str, Any] , a_ :Dict) -> Union[str, Any]:
__a : Optional[int] = np.argmax(a_ , axis=1)
return np.sum(outputs == labels)
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf_8''') as f:
__a : List[Any] = csv.reader(a_)
__a : List[str] = []
next(a_) # skip the first line
for line in tqdm(a_):
output.append((''' '''.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def __A ( a_ :Dict , a_ :str , a_ :str , a_ :List[Any] , a_ :Tuple , a_ :List[Any]) -> Any:
__a : List[str] = []
for dataset in encoded_datasets:
__a : List[str] = len(a_)
__a : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
__a : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa)
__a : Tuple = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa)
__a : Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(a_):
__a : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = with_conta
__a : int = with_conta
__a : List[str] = len(a_) - 1
__a : int = len(a_) - 1
__a : Optional[int] = with_conta
__a : Tuple = with_conta
__a : List[Any] = mc_label
__a : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(a_) for t in all_inputs))
return tensor_datasets
def __A ( ) -> Union[str, Any]:
__a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , default='''openai-gpt''' , help='''pretrained model name''')
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''')
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''')
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=a_ , default='''''')
parser.add_argument('''--eval_dataset''' , type=a_ , default='''''')
parser.add_argument('''--seed''' , type=a_ , default=42)
parser.add_argument('''--num_train_epochs''' , type=a_ , default=3)
parser.add_argument('''--train_batch_size''' , type=a_ , default=8)
parser.add_argument('''--eval_batch_size''' , type=a_ , default=16)
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a_ , help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' , type=a_ , default=1)
parser.add_argument(
'''--max_steps''' , default=-1 , type=a_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=a_ , default=6.25e-5)
parser.add_argument('''--warmup_steps''' , default=0 , type=a_ , help='''Linear warmup over warmup_steps.''')
parser.add_argument('''--lr_schedule''' , type=a_ , default='''warmup_linear''')
parser.add_argument('''--weight_decay''' , type=a_ , default=0.0_1)
parser.add_argument('''--lm_coef''' , type=a_ , default=0.9)
parser.add_argument('''--n_valid''' , type=a_ , default=3_74)
parser.add_argument('''--server_ip''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
__a : str = parser.parse_args()
print(a_)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a_)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
__a : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
__a : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(a_ , a_))
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a : List[str] = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(a_)
__a : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_)
__a : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(a_))
model.to(a_)
# Load and encode the datasets
def tokenize_and_encode(a_ :List[Any]):
if isinstance(a_ , a_):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(a_))
elif isinstance(a_ , a_):
return obj
return [tokenize_and_encode(a_) for o in obj]
logger.info('''Encoding dataset...''')
__a : Dict = load_rocstories_dataset(args.train_dataset)
__a : int = load_rocstories_dataset(args.eval_dataset)
__a : Optional[int] = (train_dataset, eval_dataset)
__a : List[Any] = tokenize_and_encode(a_)
# Compute the max input length for the Transformer
__a : List[Any] = model.config.n_positions // 2 - 2
__a : int = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
__a : Union[str, Any] = min(a_ , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a : Tuple = pre_process_datasets(a_ , a_ , a_ , *a_)
__a , __a : Tuple = tensor_datasets[0], tensor_datasets[1]
__a : List[str] = TensorDataset(*a_)
__a : Optional[Any] = RandomSampler(a_)
__a : str = DataLoader(a_ , sampler=a_ , batch_size=args.train_batch_size)
__a : List[str] = TensorDataset(*a_)
__a : Optional[int] = SequentialSampler(a_)
__a : Optional[Any] = DataLoader(a_ , sampler=a_ , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a : int = args.max_steps
__a : Optional[int] = args.max_steps // (len(a_) // args.gradient_accumulation_steps) + 1
else:
__a : str = len(a_) // args.gradient_accumulation_steps * args.num_train_epochs
__a : List[Any] = list(model.named_parameters())
__a : Optional[int] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a : List[str] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], '''weight_decay''': 0.0},
]
__a : int = AdamW(a_ , lr=args.learning_rate , eps=args.adam_epsilon)
__a : Union[str, Any] = get_linear_schedule_with_warmup(
a_ , num_warmup_steps=args.warmup_steps , num_training_steps=a_)
if args.do_train:
__a , __a , __a : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc='''Epoch'''):
__a : Dict = 0
__a : Dict = 0
__a : List[str] = tqdm(a_ , desc='''Training''')
for step, batch in enumerate(a_):
__a : Dict = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : str = batch
__a : List[Any] = model(a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a : int = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a : Tuple = '''Training loss: {:.2e} lr: {:.2e}'''.format(a_ , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a : Dict = model.module if hasattr(a_ , '''module''') else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a : int = os.path.join(args.output_dir , a_)
__a : str = os.path.join(args.output_dir , a_)
torch.save(model_to_save.state_dict() , a_)
model_to_save.config.to_json_file(a_)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
__a : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(a_)
if args.do_eval:
model.eval()
__a , __a : List[Any] = 0, 0
__a , __a : Union[str, Any] = 0, 0
for batch in tqdm(a_ , desc='''Evaluating'''):
__a : str = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : List[Any] = batch
with torch.no_grad():
__a , __a , __a , __a : str = model(
a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : List[str] = mc_logits.detach().cpu().numpy()
__a : Optional[Any] = mc_labels.to('''cpu''').numpy()
__a : str = accuracy(a_ , a_)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
__a : Tuple = eval_loss / nb_eval_steps
__a : List[str] = eval_accuracy / nb_eval_examples
__a : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
__a : List[str] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a : Dict = os.path.join(args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key in sorted(result.keys()):
logger.info(''' %s = %s''' , a_ , str(result[key]))
writer.write('''%s = %s\n''' % (key, str(result[key])))
if __name__ == "__main__":
main()
| 52 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = '''▁'''
A = {'''vocab_file''': '''sentencepiece.bpe.model'''}
A = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
A = {
'''facebook/nllb-200-distilled-600M''': 1_024,
}
# fmt: off
A = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = []
__lowerCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , _UpperCAmelCase=None , _UpperCAmelCase=False , **_UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
__a : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
__a : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__a : Any = legacy_behaviour
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_UpperCAmelCase , **_UpperCAmelCase , )
__a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
__a : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__a : int = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a : Dict = 1
__a : Optional[Any] = len(self.sp_model )
__a : Dict = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCAmelCase )
}
__a : Dict = {v: k for k, v in self.lang_code_to_id.items()}
__a : str = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__a : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__a : Union[str, Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__a : List[str] = src_lang if src_lang is not None else '''eng_Latn'''
__a : List[Any] = self.lang_code_to_id[self._src_lang]
__a : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
__a : Dict = self.__dict__.copy()
__a : Optional[Any] = None
__a : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _UpperCAmelCase ):
__a : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a : Dict = {}
__a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _lowerCamelCase ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowerCamelCase ( self ):
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__a : List[Any] = [1] * len(self.prefix_tokens )
__a : List[str] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Dict = [self.sep_token_id]
__a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a : List[str] = src_lang
__a : Dict = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
__a : Any = self.convert_tokens_to_ids(_UpperCAmelCase )
__a : int = tgt_lang_id
return inputs
def _lowerCamelCase ( self ):
__a : Tuple = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a : Tuple = self.sp_model.PieceToId(_UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCamelCase ( self , _UpperCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = ''''''.join(_UpperCAmelCase ).replace(_UpperCAmelCase , ''' ''' ).strip()
return out_string
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : List[str] = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , '''wb''' ) as fi:
__a : int = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = "eng_Latn" , _UpperCAmelCase = None , _UpperCAmelCase = "fra_Latn" , **_UpperCAmelCase , ):
__a : Optional[int] = src_lang
__a : Tuple = tgt_lang
return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : List[str] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__a : Any = []
__a : Dict = [self.eos_token_id, self.cur_lang_code]
else:
__a : Dict = [self.cur_lang_code]
__a : Optional[Any] = [self.eos_token_id]
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Dict = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__a : List[Any] = []
__a : str = [self.eos_token_id, self.cur_lang_code]
else:
__a : List[str] = [self.cur_lang_code]
__a : Union[str, Any] = [self.eos_token_id]
| 52 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : List[str] = is_training
__a : Optional[Any] = use_attention_mask
__a : Optional[Any] = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Dict = intermediate_size
__a : List[str] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : Optional[int] = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52 | 1 |
"""simple docstring"""
def __A ( a_ :int) -> int:
if not isinstance(a_ , a_) or number < 0:
raise ValueError('''Input must be a non-negative integer''')
__a : List[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''levit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=3 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=16 , _UpperCAmelCase=[128, 256, 384] , _UpperCAmelCase=[4, 8, 12] , _UpperCAmelCase=[4, 4, 4] , _UpperCAmelCase=[16, 16, 16] , _UpperCAmelCase=0 , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : int = image_size
__a : List[Any] = num_channels
__a : Dict = kernel_size
__a : Optional[int] = stride
__a : Optional[int] = padding
__a : Dict = hidden_sizes
__a : int = num_attention_heads
__a : Optional[int] = depths
__a : str = key_dim
__a : Union[str, Any] = drop_path_rate
__a : Optional[Any] = patch_size
__a : Tuple = attention_ratio
__a : int = mlp_ratio
__a : int = initializer_range
__a : int = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4
| 52 | 1 |
"""simple docstring"""
import math
def __A ( a_ :int) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __A ( a_ :float = 0.1) -> int:
__a : List[str] = 3
__a : List[Any] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 52 | 1 |
"""simple docstring"""
def __A ( a_ :int , a_ :int) -> int:
return int((input_a, input_a).count(0) == 0)
def __A ( ) -> None:
assert and_gate(0 , 0) == 0
assert and_gate(0 , 1) == 0
assert and_gate(1 , 0) == 0
assert and_gate(1 , 1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 52 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Dict = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : List[str] = edge
__a : Optional[int] = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Optional[int] = f.read().strip().split('''\n''')
__a : Dict = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : Tuple = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 52 | 1 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
if tokenize_kwargs is None:
__a : Dict = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
__a : Tuple = truncation
__a : List[Any] = tokenize_kwargs
__a : List[str] = {}
if return_tensors is not None:
__a : Optional[Any] = return_tensors
return preprocess_params, {}, postprocess_params
def _lowerCamelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ):
__a : Optional[Any] = self.framework
__a : Optional[Any] = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
return model_inputs
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = self.model(**_UpperCAmelCase )
return model_outputs
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return super().__call__(*_UpperCAmelCase , **_UpperCAmelCase )
| 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''trocr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _UpperCAmelCase=50265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
__a : List[str] = vocab_size
__a : Optional[Any] = d_model
__a : Optional[Any] = decoder_layers
__a : Union[str, Any] = decoder_attention_heads
__a : int = decoder_ffn_dim
__a : List[Any] = activation_function
__a : Any = max_position_embeddings
__a : Dict = dropout
__a : List[Any] = attention_dropout
__a : Optional[Any] = activation_dropout
__a : str = init_std
__a : List[str] = decoder_layerdrop
__a : Union[str, Any] = use_cache
__a : Optional[Any] = scale_embedding
__a : List[Any] = use_learned_position_embeddings
__a : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 52 | 1 |
"""simple docstring"""
import os
def __A ( ) -> Tuple:
with open(os.path.dirname(a_) + '''/grid.txt''') as f:
__a : Dict = [] # noqa: E741
for _ in range(20):
l.append([int(a_) for x in f.readline().split()])
__a : int = 0
# right
for i in range(20):
for j in range(17):
__a : Optional[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__a : Union[str, Any] = temp
# down
for i in range(17):
for j in range(20):
__a : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__a : Union[str, Any] = temp
# diagonal 1
for i in range(17):
for j in range(17):
__a : Union[str, Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__a : List[str] = temp
# diagonal 2
for i in range(17):
for j in range(3 , 20):
__a : int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__a : Union[str, Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 52 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __A ( a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Optional[Any] , a_ :Optional[int]=5) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''') == 1
__a : Optional[Any] = torch.tensor(tokenizer.encode(a_ , add_special_tokens=a_)).unsqueeze(0) # Batch size 1
__a : Dict = model(a_)[0] # The last hidden-state is the first element of the output tuple
__a : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__a : Any = logits[0, masked_index, :]
__a : Any = logits.softmax(dim=0)
__a , __a : Optional[Any] = prob.topk(k=a_ , dim=0)
__a : Optional[int] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(a_))])
__a : List[str] = tokenizer.mask_token
__a : Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''')):
__a : Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''')
if " {0}".format(a_) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(a_) , a_),
values[index].item(),
predicted_token,
))
else:
topk_filled_outputs.append(
(
masked_input.replace(a_ , a_),
values[index].item(),
predicted_token,
))
return topk_filled_outputs
A = CamembertTokenizer.from_pretrained('''camembert-base''')
A = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
A = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 52 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.