code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Union[str, Any] = MgpstrTokenizer
lowerCamelCase : Tuple = False
lowerCamelCase : str = {}
lowerCamelCase : Dict = False
def lowercase__ ( self : Dict ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
SCREAMING_SNAKE_CASE__ : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowercase ) + '''\n''' )
def lowercase__ ( self : Any , **_lowercase : List[Any] ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : int , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : Tuple = '''tester'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def lowercase__ ( self : Dict ):
pass
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE__ : List[str] = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.encode([special_token] , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_input_output_texts(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.convert_tokens_to_ids(_lowercase )
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : int = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertNotEqual(len(_lowercase ) , 0 )
SCREAMING_SNAKE_CASE__ : str = tokenizer.decode(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _lowercase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def lowercase__ ( self : Optional[int] ):
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def lowercase__ ( self : int ):
pass
| 35 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35 | 1 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ :Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ :Dict = 2_56
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[Any] = ['''melgan''']
def __init__( self : str , _lowercase : SpectrogramNotesEncoder , _lowercase : SpectrogramContEncoder , _lowercase : TaFilmDecoder , _lowercase : DDPMScheduler , _lowercase : OnnxRuntimeModel if is_onnx_available() else Any , ):
super().__init__()
# From MELGAN
SCREAMING_SNAKE_CASE__ : List[str] = math.log(1E-5 ) # Matches MelGAN training.
SCREAMING_SNAKE_CASE__ : str = 4.0 # Largest value for most examples
SCREAMING_SNAKE_CASE__ : str = 1_28
self.register_modules(
notes_encoder=_lowercase , continuous_encoder=_lowercase , decoder=_lowercase , scheduler=_lowercase , melgan=_lowercase , )
def lowercase__ ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=(-1.0, 1.0) , _lowercase : Dict=False ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = output_range
if clip:
SCREAMING_SNAKE_CASE__ : Tuple = torch.clip(_lowercase , self.min_value , self.max_value )
# Scale to [0, 1].
SCREAMING_SNAKE_CASE__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowercase__ ( self : Optional[Any] , _lowercase : str , _lowercase : Any=(-1.0, 1.0) , _lowercase : Tuple=False ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = input_range
SCREAMING_SNAKE_CASE__ : str = torch.clip(_lowercase , _lowercase , _lowercase ) if clip else outputs
# Scale to [0, 1].
SCREAMING_SNAKE_CASE__ : int = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowercase__ ( self : str , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : str = input_tokens > 0
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.notes_encoder(
encoder_input_tokens=_lowercase , encoder_inputs_mask=_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.continuous_encoder(
encoder_inputs=_lowercase , encoder_inputs_mask=_lowercase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Dict = noise_time
if not torch.is_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_lowercase ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ : Any = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE__ : Any = self.decoder(
encodings_and_masks=_lowercase , decoder_input_tokens=_lowercase , decoder_noise_time=_lowercase )
return logits
@torch.no_grad()
def __call__( self : Any , _lowercase : List[List[int]] , _lowercase : Optional[torch.Generator] = None , _lowercase : int = 1_00 , _lowercase : bool = True , _lowercase : str = "numpy" , _lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowercase : int = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_lowercase )}.""" )
SCREAMING_SNAKE_CASE__ : Tuple = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = np.zeros([1, 0, self.n_dims] , np.floataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_lowercase , device=self.device )
for i, encoder_input_tokens in enumerate(_lowercase ):
if i == 0:
SCREAMING_SNAKE_CASE__ : Dict = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
SCREAMING_SNAKE_CASE__ : Tuple = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_lowercase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
SCREAMING_SNAKE_CASE__ : Dict = ones
SCREAMING_SNAKE_CASE__ : Dict = self.scale_features(
_lowercase , output_range=[-1.0, 1.0] , clip=_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_lowercase , continuous_mask=_lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
SCREAMING_SNAKE_CASE__ : List[Any] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_lowercase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE__ : int = self.decode(
encodings_and_masks=_lowercase , input_tokens=_lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ : Any = self.scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
SCREAMING_SNAKE_CASE__ : List[Any] = self.scale_to_features(_lowercase , input_range=[-1.0, 1.0] )
SCREAMING_SNAKE_CASE__ : List[str] = mel[:1]
SCREAMING_SNAKE_CASE__ : List[str] = mel.cpu().float().numpy()
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase )
logger.info('''Generated segment''' , _lowercase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
SCREAMING_SNAKE_CASE__ : List[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
SCREAMING_SNAKE_CASE__ : Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_lowercase )
| 35 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ :Optional[Any] = 16
a_ :Dict = 32
def a ( A__ ) -> Dict:
'''simple docstring'''
return int(x / 2**2_0 )
class lowercase :
def __enter__( self : List[str] ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.memory_allocated()
return self
def __exit__( self : Union[str, Any] , *_lowercase : Tuple ):
gc.collect()
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bamb(self.end - self.begin )
SCREAMING_SNAKE_CASE__ : str = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def a ( A__ , A__ = 1_6 , A__ = "bert-base-cased" , A__ = 3_2_0 , A__ = 1_6_0 , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained(A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': f"""train[:{n_train}]""", '''validation''': f"""validation[:{n_val}]"""} )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE__ : str = datasets.map(
A__ , batched=A__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(A__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def a ( A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : int = config['''lr''']
SCREAMING_SNAKE_CASE__ : Optional[int] = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ : List[str] = int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ : Tuple = args.model_name_or_path
set_seed(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = get_dataloaders(A__ , A__ , A__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : str = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE__ : Dict = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
SCREAMING_SNAKE_CASE__ : Tuple = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE__ : List[str] = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE__ : Any = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE__ : int = 0
# Now we train the model
SCREAMING_SNAKE_CASE__ : Dict = {}
for epoch in range(A__ , A__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = model(**A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.loss
SCREAMING_SNAKE_CASE__ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
SCREAMING_SNAKE_CASE__ : Dict = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(A__ , A__ )
def a ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=A__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=A__ , )
parser.add_argument(
'''--output_dir''' , type=A__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=A__ , default=A__ , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=A__ , default=3_2_0 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=A__ , default=1_6_0 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=A__ , default=1 , help='''Number of train epochs.''' , )
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Dict = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 35 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 | 1 |
class lowercase :
def __init__( self : int , _lowercase : Dict ):
# we need a list not a string, so do something to change the type
SCREAMING_SNAKE_CASE__ : int = arr.split(''',''' )
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Any = [int(self.array[0] )] * len(self.array )
SCREAMING_SNAKE_CASE__ : Dict = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
SCREAMING_SNAKE_CASE__ : str = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
SCREAMING_SNAKE_CASE__ : Any = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
a_ :Tuple = input('please input some numbers:')
a_ :Optional[Any] = SubArray(whole_array)
a_ :Any = array.solve_sub_array()
print(('the results is:', re))
| 35 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str=0.2 , _lowercase : str=0.2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : List[str] = conva_get[:2]
SCREAMING_SNAKE_CASE__ : str = conva_get[2]
SCREAMING_SNAKE_CASE__ : Any = size_pa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_w
SCREAMING_SNAKE_CASE__ : Tuple = rate_t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self : Union[str, Any] , _lowercase : Any ):
# save model dict with pickle
SCREAMING_SNAKE_CASE__ : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowercase , '''wb''' ) as f:
pickle.dump(_lowercase , _lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls : Dict , _lowercase : int ):
# read saved model
with open(_lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pickle.load(_lowercase ) # noqa: S301
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE__ : Dict = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ ( self : str , _lowercase : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
return round(_lowercase , 3 )
def lowercase__ ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
# convolution process
SCREAMING_SNAKE_CASE__ : Tuple = convs[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = convs[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_lowercase )
return focus_list, data_featuremap
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="average_pool" ):
# pooling process
SCREAMING_SNAKE_CASE__ : List[str] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_map in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_lowercase )
return data_expanded
def lowercase__ ( self : Tuple , _lowercase : Optional[int] ):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = 0
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Any = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ : Dict = i_pool + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : int=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ : Any = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ : str = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : int = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Any = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(_lowercase , self.vji )
SCREAMING_SNAKE_CASE__ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ : List[str] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ : Dict = self.rate_weight * np.dot(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rp + 1
SCREAMING_SNAKE_CASE__ : List[str] = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '''+-''' )
plt.plot(_lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self : Union[str, Any] , _lowercase : int ):
# model predict
SCREAMING_SNAKE_CASE__ : Dict = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Any = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ : str = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple ):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE__ : str = np.asmatrix(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 35 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Optional[int] = CycleDiffusionPipeline
lowerCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
lowerCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
lowerCamelCase : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
lowerCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : Optional[int] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ : int = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
SCREAMING_SNAKE_CASE__ : int = CLIPTextModel(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase__ ( self : int , _lowercase : List[str] , _lowercase : int=0 ):
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : int = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = torch.manual_seed(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : str = CycleDiffusionPipeline(**_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = output.images
SCREAMING_SNAKE_CASE__ : int = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : str = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
SCREAMING_SNAKE_CASE__ : str = module.half()
SCREAMING_SNAKE_CASE__ : str = CycleDiffusionPipeline(**_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self.get_dummy_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = pipe(**_lowercase )
SCREAMING_SNAKE_CASE__ : str = output.images
SCREAMING_SNAKE_CASE__ : List[str] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowercase__ ( self : List[Any] ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def lowercase__ ( self : str ):
return super().test_inference_batch_single_identical()
@skip_mps
def lowercase__ ( self : Union[str, Any] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase__ ( self : Optional[Any] ):
return super().test_save_load_optional_components()
@skip_mps
def lowercase__ ( self : Optional[Any] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
SCREAMING_SNAKE_CASE__ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = init_image.resize((5_12, 5_12) )
SCREAMING_SNAKE_CASE__ : List[Any] = '''CompVis/stable-diffusion-v1-4'''
SCREAMING_SNAKE_CASE__ : List[str] = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ : Tuple = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : List[str] = '''A black colored car'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''A blue colored car'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
SCREAMING_SNAKE_CASE__ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = init_image.resize((5_12, 5_12) )
SCREAMING_SNAKE_CASE__ : int = '''CompVis/stable-diffusion-v1-4'''
SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ : str = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Optional[int] = '''A black colored car'''
SCREAMING_SNAKE_CASE__ : int = '''A blue colored car'''
SCREAMING_SNAKE_CASE__ : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 35 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 1 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : Optional[int] , _lowercase : int = 6 ):
SCREAMING_SNAKE_CASE__ : Node | None = None
SCREAMING_SNAKE_CASE__ : Node | None = None
self.create_linked_list(_lowercase )
def lowercase__ ( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Dict = Node()
SCREAMING_SNAKE_CASE__ : str = current_node
SCREAMING_SNAKE_CASE__ : Any = current_node
SCREAMING_SNAKE_CASE__ : Union[str, Any] = current_node
for _ in range(1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = Node()
SCREAMING_SNAKE_CASE__ : Optional[Any] = current_node
SCREAMING_SNAKE_CASE__ : Optional[Any] = previous_node
SCREAMING_SNAKE_CASE__ : Tuple = current_node
SCREAMING_SNAKE_CASE__ : List[Any] = self.front
SCREAMING_SNAKE_CASE__ : List[str] = previous_node
def lowercase__ ( self : Optional[Any] ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowercase__ ( self : List[Any] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowercase__ ( self : Dict , _lowercase : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
SCREAMING_SNAKE_CASE__ : Optional[int] = self.rear.next
if self.rear:
SCREAMING_SNAKE_CASE__ : str = data
def lowercase__ ( self : str ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
SCREAMING_SNAKE_CASE__ : List[str] = self.front.data
SCREAMING_SNAKE_CASE__ : Any = None
return data
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.front
SCREAMING_SNAKE_CASE__ : List[Any] = old_front.next
SCREAMING_SNAKE_CASE__ : str = old_front.data
SCREAMING_SNAKE_CASE__ : List[Any] = None
return data
def lowercase__ ( self : Tuple ):
if self.is_empty():
raise Exception('''Empty Queue''' )
def lowercase__ ( self : Optional[int] ):
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class lowercase :
def __init__( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any | None = None
SCREAMING_SNAKE_CASE__ : Node | None = None
SCREAMING_SNAKE_CASE__ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35 | 1 |
from __future__ import annotations
def a ( A__ ) -> bool:
'''simple docstring'''
if len(A__ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
SCREAMING_SNAKE_CASE__ : List[str] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import requests
from bsa import BeautifulSoup
def a ( A__ = "https://www.worldometers.info/coronavirus" ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = BeautifulSoup(requests.get(A__ ).text , '''html.parser''' )
SCREAMING_SNAKE_CASE__ : List[str] = soup.findAll('''h1''' )
SCREAMING_SNAKE_CASE__ : int = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(A__ , A__ )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 35 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 | 1 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowercase ( unittest.TestCase ):
lowerCamelCase : Any = MODEL_FOR_CAUSAL_LM_MAPPING
lowerCamelCase : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE__ : List[str] = text_generator('''This is a test''' , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
SCREAMING_SNAKE_CASE__ : List[Any] = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_lowercase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
SCREAMING_SNAKE_CASE__ : List[str] = text_generator('''This is a test''' , do_sample=_lowercase , num_return_sequences=2 , return_tensors=_lowercase )
self.assertEqual(
_lowercase , [
{'''generated_token_ids''': ANY(_lowercase )},
{'''generated_token_ids''': ANY(_lowercase )},
] , )
SCREAMING_SNAKE_CASE__ : Optional[int] = text_generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = '''<pad>'''
SCREAMING_SNAKE_CASE__ : Tuple = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=_lowercase , )
self.assertEqual(
_lowercase , [
[
{'''generated_token_ids''': ANY(_lowercase )},
{'''generated_token_ids''': ANY(_lowercase )},
],
[
{'''generated_token_ids''': ANY(_lowercase )},
{'''generated_token_ids''': ANY(_lowercase )},
],
] , )
@require_tf
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE__ : Optional[int] = text_generator('''This is a test''' , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
SCREAMING_SNAKE_CASE__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def lowercase__ ( self : Dict , _lowercase : int , _lowercase : Tuple , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Tuple = TextGenerationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_generator, ["This is a test", "Another test"]
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''Hello I believe in'''
SCREAMING_SNAKE_CASE__ : int = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_generator(_lowercase )
self.assertEqual(
_lowercase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
SCREAMING_SNAKE_CASE__ : str = text_generator(_lowercase , stop_sequence=''' fe''' )
self.assertEqual(_lowercase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def lowercase__ ( self : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text_generator.model
SCREAMING_SNAKE_CASE__ : Tuple = text_generator.tokenizer
SCREAMING_SNAKE_CASE__ : Any = text_generator('''This is a test''' )
self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_generator('''This is a test''' , return_full_text=_lowercase )
self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
SCREAMING_SNAKE_CASE__ : Tuple = pipeline(task='''text-generation''' , model=_lowercase , tokenizer=_lowercase , return_full_text=_lowercase )
SCREAMING_SNAKE_CASE__ : int = text_generator('''This is a test''' )
self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_generator('''This is a test''' , return_full_text=_lowercase )
self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
[{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}],
[{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
[{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}],
[{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}],
] , )
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = text_generator('''test''' , return_full_text=_lowercase , return_text=_lowercase )
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = text_generator('''test''' , return_full_text=_lowercase , return_tensors=_lowercase )
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = text_generator('''test''' , return_text=_lowercase , return_tensors=_lowercase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_generator('''''' )
self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
SCREAMING_SNAKE_CASE__ : int = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 5_00 , max_new_tokens=20 )
SCREAMING_SNAKE_CASE__ : Any = text_generator('''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_lowercase ):
text_generator(
'''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowercase__ ( self : str ):
import torch
# Classic `model_kwargs`
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
SCREAMING_SNAKE_CASE__ : int = pipe('''This is a test''' )
self.assertEqual(
_lowercase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
SCREAMING_SNAKE_CASE__ : int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe('''This is a test''' )
self.assertEqual(
_lowercase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
SCREAMING_SNAKE_CASE__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
SCREAMING_SNAKE_CASE__ : List[Any] = pipe('''This is a test''' )
self.assertEqual(
_lowercase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def lowercase__ ( self : Optional[int] ):
import torch
SCREAMING_SNAKE_CASE__ : Tuple = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def lowercase__ ( self : Any ):
import torch
SCREAMING_SNAKE_CASE__ : List[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_lowercase , top_p=0.5 )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''Hello world'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger('''transformers.generation.tf_utils''' )
else:
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger('''transformers.generation.utils''' )
SCREAMING_SNAKE_CASE__ : Tuple = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_lowercase ) as cl:
SCREAMING_SNAKE_CASE__ : Dict = text_generator(_lowercase , max_length=10 , max_new_tokens=1 )
self.assertIn(_lowercase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_lowercase ) as cl:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text_generator(_lowercase , max_new_tokens=1 )
self.assertNotIn(_lowercase , cl.out )
with CaptureLogger(_lowercase ) as cl:
SCREAMING_SNAKE_CASE__ : Tuple = text_generator(_lowercase , max_length=10 )
self.assertNotIn(_lowercase , cl.out )
| 35 |
import random
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = num - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : int = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pow(A__ , A__ , A__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : Any = i + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (v**2) % num
return True
def a ( A__ ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A__ )
def a ( A__ = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A__ ):
return num
if __name__ == "__main__":
a_ :Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 35 | 1 |
def a ( A__ , A__ ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = len(A__ )
print('''The following activities are selected:''' )
# The first activity is always selected
SCREAMING_SNAKE_CASE__ : List[str] = 0
print(A__ , end=''',''' )
# Consider rest of the activities
for j in range(A__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(A__ , end=''',''' )
SCREAMING_SNAKE_CASE__ : int = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ :Dict = [1, 3, 0, 5, 8, 5]
a_ :int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 35 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35 | 1 |
a_ :int = 6_55_21
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = 1
SCREAMING_SNAKE_CASE__ : str = 0
for plain_chr in plain_text:
SCREAMING_SNAKE_CASE__ : Any = (a + ord(A__ )) % MOD_ADLER
SCREAMING_SNAKE_CASE__ : Optional[int] = (b + a) % MOD_ADLER
return (b << 1_6) | a
| 35 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 | 1 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
a_ :List[Any] = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
a_ :Union[str, Any] = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
a_ :Optional[int] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def a ( A__ ) -> Any:
'''simple docstring'''
def remove_articles(A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(A__ , ''' ''' , A__ )
def white_space_fix(A__ ):
return " ".join(text.split() )
def remove_punc(A__ ):
SCREAMING_SNAKE_CASE__ : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) )
def a ( A__ , A__ ) -> Tuple:
'''simple docstring'''
return int(normalize_answer(A__ ) == normalize_answer(A__ ) )
def a ( A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = [any(compute_exact(A__ , A__ ) for ref in refs ) for pred, refs in zip(A__ , A__ )]
return (sum(A__ ) / len(A__ )) * 1_0_0
def a ( A__ , A__ , A__ , A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = [rgram for rgrams in rgramslist for rgram in rgrams]
SCREAMING_SNAKE_CASE__ : Optional[int] = Counter(A__ )
SCREAMING_SNAKE_CASE__ : str = Counter(A__ )
SCREAMING_SNAKE_CASE__ : Any = Counter()
for sgram, scount in sgramcounter.items():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scount * numref
SCREAMING_SNAKE_CASE__ : Tuple = Counter(A__ )
SCREAMING_SNAKE_CASE__ : List[str] = Counter()
for cgram, ccount in cgramcounter.items():
SCREAMING_SNAKE_CASE__ : Tuple = ccount * numref
# KEEP
SCREAMING_SNAKE_CASE__ : int = sgramcounter_rep & cgramcounter_rep
SCREAMING_SNAKE_CASE__ : Any = keepgramcounter_rep & rgramcounter
SCREAMING_SNAKE_CASE__ : Tuple = sgramcounter_rep & rgramcounter
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : int = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : Dict = 1
if len(A__ ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = keeptmpscorea / len(A__ )
if len(A__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
SCREAMING_SNAKE_CASE__ : Dict = keeptmpscorea / sum(keepgramcounterall_rep.values() )
SCREAMING_SNAKE_CASE__ : Dict = 0
if keepscore_precision > 0 or keepscore_recall > 0:
SCREAMING_SNAKE_CASE__ : str = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
SCREAMING_SNAKE_CASE__ : int = sgramcounter_rep - cgramcounter_rep
SCREAMING_SNAKE_CASE__ : Any = delgramcounter_rep - rgramcounter
SCREAMING_SNAKE_CASE__ : Optional[int] = sgramcounter_rep - rgramcounter
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : Tuple = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE__ : List[Any] = 1
if len(A__ ) > 0:
SCREAMING_SNAKE_CASE__ : List[str] = deltmpscorea / len(A__ )
# ADDITION
SCREAMING_SNAKE_CASE__ : List[Any] = set(A__ ) - set(A__ )
SCREAMING_SNAKE_CASE__ : str = set(A__ ) & set(A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = set(A__ ) - set(A__ )
SCREAMING_SNAKE_CASE__ : str = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
SCREAMING_SNAKE_CASE__ : List[Any] = 1
if len(A__ ) > 0:
SCREAMING_SNAKE_CASE__ : str = addtmpscore / len(A__ )
if len(A__ ) > 0:
SCREAMING_SNAKE_CASE__ : int = addtmpscore / len(A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
if addscore_precision > 0 or addscore_recall > 0:
SCREAMING_SNAKE_CASE__ : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = len(A__ )
SCREAMING_SNAKE_CASE__ : Dict = ssent.split(''' ''' )
SCREAMING_SNAKE_CASE__ : Dict = csent.split(''' ''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : str = []
for rsent in rsents:
SCREAMING_SNAKE_CASE__ : Dict = rsent.split(''' ''' )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
ragramslist.append(A__ )
for i in range(0 , len(A__ ) - 1 ):
if i < len(A__ ) - 1:
SCREAMING_SNAKE_CASE__ : Tuple = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(A__ )
if i < len(A__ ) - 2:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(A__ )
if i < len(A__ ) - 3:
SCREAMING_SNAKE_CASE__ : List[Any] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(A__ )
ragramslist.append(A__ )
ragramslist.append(A__ )
ragramslist.append(A__ )
for i in range(0 , len(A__ ) - 1 ):
if i < len(A__ ) - 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(A__ )
if i < len(A__ ) - 2:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(A__ )
if i < len(A__ ) - 3:
SCREAMING_SNAKE_CASE__ : List[Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(A__ )
for i in range(0 , len(A__ ) - 1 ):
if i < len(A__ ) - 1:
SCREAMING_SNAKE_CASE__ : List[str] = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(A__ )
if i < len(A__ ) - 2:
SCREAMING_SNAKE_CASE__ : List[Any] = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(A__ )
if i < len(A__ ) - 3:
SCREAMING_SNAKE_CASE__ : Dict = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(A__ )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Tuple = SARIngram(A__ , A__ , A__ , A__ )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Union[str, Any] = SARIngram(A__ , A__ , A__ , A__ )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : str = SARIngram(A__ , A__ , A__ , A__ )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Optional[int] = SARIngram(A__ , A__ , A__ , A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
SCREAMING_SNAKE_CASE__ : Optional[int] = sum([delascore, delascore, delascore, delascore] ) / 4
SCREAMING_SNAKE_CASE__ : List[str] = sum([addascore, addascore, addascore, addascore] ) / 4
SCREAMING_SNAKE_CASE__ : int = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def a ( A__ , A__ = True , A__ = "13a" , A__ = True ) -> Optional[int]:
'''simple docstring'''
if lowercase:
SCREAMING_SNAKE_CASE__ : List[str] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
SCREAMING_SNAKE_CASE__ : str = sacrebleu.metrics.bleu._get_tokenizer(A__ )()(A__ )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = sacrebleu.TOKENIZERS[tokenizer]()(A__ )
elif tokenizer == "moses":
SCREAMING_SNAKE_CASE__ : str = sacremoses.MosesTokenizer().tokenize(A__ , return_str=A__ , escape=A__ )
elif tokenizer == "penn":
SCREAMING_SNAKE_CASE__ : int = sacremoses.MosesTokenizer().penn_tokenize(A__ , return_str=A__ )
else:
SCREAMING_SNAKE_CASE__ : Tuple = sentence
if not return_str:
SCREAMING_SNAKE_CASE__ : List[str] = normalized_sent.split()
return normalized_sent
def a ( A__ , A__ , A__ ) -> int:
'''simple docstring'''
if not (len(A__ ) == len(A__ ) == len(A__ )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
SCREAMING_SNAKE_CASE__ : str = 0
for src, pred, refs in zip(A__ , A__ , A__ ):
sari_score += SARIsent(normalize(A__ ) , normalize(A__ ) , [normalize(A__ ) for sent in refs] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sari_score / len(A__ )
return 1_0_0 * sari_score
def a ( A__ , A__ , A__="exp" , A__=None , A__=False , A__=False , A__=False , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = len(references[0] )
if any(len(A__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
SCREAMING_SNAKE_CASE__ : Tuple = [[refs[i] for refs in references] for i in range(A__ )]
SCREAMING_SNAKE_CASE__ : Optional[int] = sacrebleu.corpus_bleu(
A__ , A__ , smooth_method=A__ , smooth_value=A__ , force=A__ , lowercase=A__ , use_effective_order=A__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def lowercase__ ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
result.update({'''sari''': compute_sari(sources=_lowercase , predictions=_lowercase , references=_lowercase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowercase , references=_lowercase )} )
result.update({'''exact''': compute_em(predictions=_lowercase , references=_lowercase )} )
return result
| 35 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
SCREAMING_SNAKE_CASE__ : Any = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
SCREAMING_SNAKE_CASE__ : Optional[int] = tf_top_k_top_p_filtering(_lowercase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output[output != -float('''inf''' )]
SCREAMING_SNAKE_CASE__ : Dict = tf.cast(
tf.where(tf.not_equal(_lowercase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_lowercase , _lowercase , rtol=1E-12 )
tf.debugging.assert_equal(_lowercase , _lowercase )
@require_tf
class lowercase ( unittest.TestCase , _UpperCAmelCase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowerCamelCase : Optional[int] = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowercase__ ( self : Any ):
# TF-only test: tf.saved_model export
SCREAMING_SNAKE_CASE__ : Any = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = 2
SCREAMING_SNAKE_CASE__ : Dict = 2
class lowercase ( tf.Module ):
def __init__( self : List[str] , _lowercase : List[Any] ):
super(_lowercase , self ).__init__()
SCREAMING_SNAKE_CASE__ : Tuple = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_lowercase , )
def lowercase__ ( self : List[str] , _lowercase : Tuple , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : str = self.model.generate(
input_ids=_lowercase , attention_mask=_lowercase , max_new_tokens=_lowercase , return_dict_in_generate=_lowercase , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE__ : Any = [[2, 0], [1_02, 1_03]]
SCREAMING_SNAKE_CASE__ : Any = [[1, 0], [1, 1]]
SCREAMING_SNAKE_CASE__ : List[Any] = DummyModel(model=_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_lowercase , _lowercase , signatures={'''serving_default''': dummy_model.serving} )
SCREAMING_SNAKE_CASE__ : str = tf.saved_model.load(_lowercase ).signatures['''serving_default''']
for batch_size in range(1 , len(_lowercase ) + 1 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''input_ids''': tf.constant(dummy_input_ids[:batch_size] ),
'''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ),
}
SCREAMING_SNAKE_CASE__ : Optional[int] = serving_func(**_lowercase )['''sequences''']
SCREAMING_SNAKE_CASE__ : Optional[int] = test_model.generate(**_lowercase , max_new_tokens=_lowercase )
tf.debugging.assert_equal(_lowercase , _lowercase )
@slow
def lowercase__ ( self : Optional[Any] ):
# TF-only test: tf.saved_model export
SCREAMING_SNAKE_CASE__ : List[str] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : Tuple = 2
class lowercase ( tf.Module ):
def __init__( self : Union[str, Any] , _lowercase : int ):
super(_lowercase , self ).__init__()
SCREAMING_SNAKE_CASE__ : List[str] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ),
) , jit_compile=_lowercase , )
def lowercase__ ( self : Dict , _lowercase : Tuple , _lowercase : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Any = self.model.generate(
input_ids=_lowercase , attention_mask=_lowercase , max_new_tokens=_lowercase , return_dict_in_generate=_lowercase , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[2], [1_02, 1_03]]
SCREAMING_SNAKE_CASE__ : int = [[1], [1, 1]]
SCREAMING_SNAKE_CASE__ : Any = DummyModel(model=_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_lowercase , _lowercase , signatures={'''serving_default''': dummy_model.serving} )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.saved_model.load(_lowercase ).signatures['''serving_default''']
for input_row in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : str = {
'''input_ids''': tf.constant([dummy_input_ids[input_row]] ),
'''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ),
}
SCREAMING_SNAKE_CASE__ : List[str] = serving_func(**_lowercase )['''sequences''']
SCREAMING_SNAKE_CASE__ : Optional[int] = test_model.generate(**_lowercase , max_new_tokens=_lowercase )
tf.debugging.assert_equal(_lowercase , _lowercase )
@slow
@require_tensorflow_text
def lowercase__ ( self : Optional[int] ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=_lowercase )
class lowercase ( tf.keras.layers.Layer ):
def __init__( self : Any ):
super().__init__()
SCREAMING_SNAKE_CASE__ : int = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_lowercase , '''spiece.model''' ) , '''rb''' ).read() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
def lowercase__ ( self : List[str] , _lowercase : Optional[Any] , *_lowercase : Tuple , **_lowercase : str ):
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.tokenize(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = text.pad_model_inputs(
_lowercase , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase )
return self.tokenizer.detokenize(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = CompleteSentenceTransformer()
SCREAMING_SNAKE_CASE__ : str = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' )
SCREAMING_SNAKE_CASE__ : Dict = complete_model(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.keras.Model(_lowercase , _lowercase )
keras_model.save(_lowercase )
def lowercase__ ( self : str ):
# Has PT equivalent: this test relies on random sampling
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''do_sample''': True,
'''num_beams''': 1,
'''top_p''': 0.7,
'''top_k''': 10,
'''temperature''': 0.7,
}
SCREAMING_SNAKE_CASE__ : Any = 14
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''Hello, my dog is cute and'''
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_lowercase , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE__ : List[str] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
SCREAMING_SNAKE_CASE__ : List[str] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = model.generate(**_lowercase , eos_token_id=_lowercase , **_lowercase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
SCREAMING_SNAKE_CASE__ : List[str] = [6_38, 1_98]
with tf.device(''':/CPU:0''' ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.generate(**_lowercase , eos_token_id=_lowercase , **_lowercase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowercase__ ( self : Optional[int] ):
# Has PT equivalent: ample use of framework-specific code
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
SCREAMING_SNAKE_CASE__ : List[Any] = '''Hugging Face is a technology company based in New York and Paris.'''
SCREAMING_SNAKE_CASE__ : Optional[int] = bart_tokenizer(_lowercase , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE__ : str = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
SCREAMING_SNAKE_CASE__ : str = bart_model.generate(_lowercase ).numpy()
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[int] , _lowercase : List[str]=None , **_lowercase : Union[str, Any] ):
return super().call(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' )
SCREAMING_SNAKE_CASE__ : List[Any] = bart_model.generate(_lowercase , foo='''bar''' ).numpy()
self.assertTrue(np.array_equal(_lowercase , _lowercase ) )
class lowercase ( bart_model.model.encoder.__class__ ):
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] , **_lowercase : List[str] ):
return super().call(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = FakeEncoder(bart_model.config , bart_model.model.shared )
SCREAMING_SNAKE_CASE__ : List[Any] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
SCREAMING_SNAKE_CASE__ : Dict = bart_model.generate(_lowercase ).numpy()
with self.assertRaises(_lowercase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_lowercase , foo='''bar''' )
| 35 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Union[str, Any] = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
a_ :List[str] = {
'google/realm-cc-news-pretrained-embedder': 5_12,
'google/realm-cc-news-pretrained-encoder': 5_12,
'google/realm-cc-news-pretrained-scorer': 5_12,
'google/realm-cc-news-pretrained-openqa': 5_12,
'google/realm-orqa-nq-openqa': 5_12,
'google/realm-orqa-nq-reader': 5_12,
'google/realm-orqa-wq-openqa': 5_12,
'google/realm-orqa-wq-reader': 5_12,
}
a_ :Tuple = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Any = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[str] = RealmTokenizer
def __init__( self : Any , _lowercase : Any=None , _lowercase : Any=None , _lowercase : List[Any]=True , _lowercase : Any="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[Any]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : str="[MASK]" , _lowercase : Optional[int]=True , _lowercase : Any=None , **_lowercase : List[str] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : Tuple = getattr(_lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = do_lower_case
SCREAMING_SNAKE_CASE__ : Any = strip_accents
SCREAMING_SNAKE_CASE__ : Dict = tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : str = normalizer_class(**_lowercase )
SCREAMING_SNAKE_CASE__ : Any = do_lower_case
def lowercase__ ( self : Dict , _lowercase : Tuple , **_lowercase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE__ : Any = text
SCREAMING_SNAKE_CASE__ : List[str] = kwargs.pop('''text_pair''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop('''return_tensors''' , _lowercase )
SCREAMING_SNAKE_CASE__ : str = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(_lowercase ):
if batch_text_pair is not None:
SCREAMING_SNAKE_CASE__ : Dict = batch_text_pair[idx]
else:
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : Any = super().__call__(_lowercase , _lowercase , return_tensors=_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = encoded_candidates.get('''input_ids''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoded_candidates.get('''attention_mask''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = {key: item for key, item in output_data.items() if len(_lowercase ) != 0}
return BatchEncoding(_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[str]=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Optional[int] , _lowercase : str , _lowercase : Optional[str] = None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 35 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 | 1 |
from timeit import timeit
def a ( A__ ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('''the value of input must not be negative''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
while number:
number &= number - 1
result += 1
return result
def a ( A__ ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('''the value of input must not be negative''' )
SCREAMING_SNAKE_CASE__ : List[str] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a ( ) -> None:
'''simple docstring'''
def do_benchmark(A__ ) -> None:
SCREAMING_SNAKE_CASE__ : List[Any] = '''import __main__ as z'''
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(A__ ) = }""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=A__ )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(A__ ) = }""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=A__ , )
print(f"""timeit() runs in {timing} seconds""" )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(A__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 35 |
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 | 1 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[Any] = (CMStochasticIterativeScheduler,)
lowerCamelCase : str = 10
def lowercase__ ( self : Optional[Any] , **_lowercase : int ):
SCREAMING_SNAKE_CASE__ : str = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**_lowercase )
return config
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = 10
SCREAMING_SNAKE_CASE__ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Any = self.scheduler_classes[0](**_lowercase )
scheduler.set_timesteps(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.timesteps[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler.timesteps[1]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dummy_sample
SCREAMING_SNAKE_CASE__ : str = 0.1 * sample
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
SCREAMING_SNAKE_CASE__ : List[str] = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self : str ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowercase )
def lowercase__ ( self : Tuple ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_lowercase )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Dict = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = 1
scheduler.set_timesteps(_lowercase )
SCREAMING_SNAKE_CASE__ : int = scheduler.timesteps
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_lowercase ):
# 1. scale model input
SCREAMING_SNAKE_CASE__ : Any = scheduler.scale_model_input(_lowercase , _lowercase )
# 2. predict noise residual
SCREAMING_SNAKE_CASE__ : Dict = model(_lowercase , _lowercase )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE__ : Any = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
SCREAMING_SNAKE_CASE__ : List[Any] = pred_prev_sample
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[Any] = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [1_06, 0]
scheduler.set_timesteps(timesteps=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.timesteps
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
SCREAMING_SNAKE_CASE__ : int = scheduler.scale_model_input(_lowercase , _lowercase )
# 2. predict noise residual
SCREAMING_SNAKE_CASE__ : int = model(_lowercase , _lowercase )
# 3. predict previous sample x_t-1
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
SCREAMING_SNAKE_CASE__ : Tuple = pred_prev_sample
SCREAMING_SNAKE_CASE__ : int = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Dict = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : str = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = [39, 30, 12, 15, 0]
with self.assertRaises(_lowercase , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_lowercase )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : str = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Any = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ : int = [39, 30, 12, 1, 0]
SCREAMING_SNAKE_CASE__ : Optional[int] = len(_lowercase )
with self.assertRaises(_lowercase , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowercase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_lowercase )
| 35 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 1 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ :Union[str, Any] = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Union[str, Any] = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :int = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 |
def a ( A__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
lowerCamelCase : Any = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowerCamelCase : int = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowercase__ ( self : str , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[Any] = AudioClassificationPipeline(model=_lowercase , feature_extractor=_lowercase )
# test with a raw waveform
SCREAMING_SNAKE_CASE__ : List[Any] = np.zeros((3_40_00,) )
SCREAMING_SNAKE_CASE__ : Dict = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def lowercase__ ( self : List[str] , _lowercase : Optional[int] , _lowercase : Dict ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = examples
SCREAMING_SNAKE_CASE__ : Dict = audio_classifier(_lowercase )
# by default a model is initialized with num_labels=2
self.assertEqual(
_lowercase , [
{'''score''': ANY(_lowercase ), '''label''': ANY(_lowercase )},
{'''score''': ANY(_lowercase ), '''label''': ANY(_lowercase )},
] , )
SCREAMING_SNAKE_CASE__ : str = audio_classifier(_lowercase , top_k=1 )
self.assertEqual(
_lowercase , [
{'''score''': ANY(_lowercase ), '''label''': ANY(_lowercase )},
] , )
self.run_torchaudio(_lowercase )
@require_torchaudio
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
import datasets
# test with a local file
SCREAMING_SNAKE_CASE__ : str = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
SCREAMING_SNAKE_CASE__ : int = dataset[0]['''audio''']['''array''']
SCREAMING_SNAKE_CASE__ : str = audio_classifier(_lowercase )
self.assertEqual(
_lowercase , [
{'''score''': ANY(_lowercase ), '''label''': ANY(_lowercase )},
{'''score''': ANY(_lowercase ), '''label''': ANY(_lowercase )},
] , )
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : int = '''anton-l/wav2vec2-random-tiny-classifier'''
SCREAMING_SNAKE_CASE__ : str = pipeline('''audio-classification''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.ones((80_00,) )
SCREAMING_SNAKE_CASE__ : List[str] = audio_classifier(_lowercase , top_k=4 )
SCREAMING_SNAKE_CASE__ : Dict = [
{'''score''': 0.0842, '''label''': '''no'''},
{'''score''': 0.0838, '''label''': '''up'''},
{'''score''': 0.0837, '''label''': '''go'''},
{'''score''': 0.0834, '''label''': '''right'''},
]
SCREAMING_SNAKE_CASE__ : Optional[int] = [
{'''score''': 0.0845, '''label''': '''stop'''},
{'''score''': 0.0844, '''label''': '''on'''},
{'''score''': 0.0841, '''label''': '''right'''},
{'''score''': 0.0834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(_lowercase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
SCREAMING_SNAKE_CASE__ : Tuple = {'''array''': np.ones((80_00,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
SCREAMING_SNAKE_CASE__ : List[str] = audio_classifier(_lowercase , top_k=4 )
self.assertIn(nested_simplify(_lowercase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowercase__ ( self : List[str] ):
import datasets
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''superb/wav2vec2-base-superb-ks'''
SCREAMING_SNAKE_CASE__ : Any = pipeline('''audio-classification''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : int = audio_classifier(_lowercase , top_k=4 )
self.assertEqual(
nested_simplify(_lowercase , decimals=3 ) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def lowercase__ ( self : Union[str, Any] ):
pass
| 35 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCamelCase : int = '''CIDAS/clipseg-rd64-refined'''
lowerCamelCase : Optional[int] = '''image_segmenter'''
lowerCamelCase : int = CLIPSegForImageSegmentation
lowerCamelCase : Tuple = ['''image''', '''text''']
lowerCamelCase : List[str] = ['''image''']
def __init__( self : Optional[Any] , *_lowercase : List[Any] , **_lowercase : List[str] ):
requires_backends(self , ['''vision'''] )
super().__init__(*_lowercase , **_lowercase )
def lowercase__ ( self : str , _lowercase : "Image" , _lowercase : str ):
return self.pre_processor(text=[label] , images=[image] , padding=_lowercase , return_tensors='''pt''' )
def lowercase__ ( self : int , _lowercase : Dict ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model(**_lowercase ).logits
return logits
def lowercase__ ( self : Tuple , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.cpu().detach().numpy()
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 35 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[str] = num_of_nodes
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Optional[int] , _lowercase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Any = self.find_component(_lowercase )
def lowercase__ ( self : int , _lowercase : list[int] , _lowercase : int , _lowercase : int ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Tuple = self.m_component[u]
SCREAMING_SNAKE_CASE__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = edge
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import baseaa
def a ( A__ ) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode('''utf-8''' ) )
def a ( A__ ) -> str:
'''simple docstring'''
return baseaa.baadecode(A__ ).decode('''utf-8''' )
if __name__ == "__main__":
a_ :Tuple = 'Hello World!'
a_ :List[str] = baseaa_encode(test)
print(encoded)
a_ :Optional[int] = baseaa_decode(encoded)
print(decoded)
| 35 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ :Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
a_ :Any = TypeVar('T')
class lowercase ( Generic[T] ):
def __init__( self : Union[str, Any] , _lowercase : bool = True ):
SCREAMING_SNAKE_CASE__ : dict[T, list[T]] = {} # dictionary of lists
SCREAMING_SNAKE_CASE__ : int = directed
def lowercase__ ( self : Optional[Any] , _lowercase : T , _lowercase : T ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowercase )
self.adj_list[destination_vertex].append(_lowercase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
SCREAMING_SNAKE_CASE__ : List[str] = [destination_vertex]
SCREAMING_SNAKE_CASE__ : str = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowercase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [destination_vertex]
SCREAMING_SNAKE_CASE__ : Any = []
return self
def __repr__( self : Tuple ):
return pformat(self.adj_list )
| 35 |
def a ( A__ ) -> str:
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def a ( A__ ) -> bytes:
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ :Union[str, Any] = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
a_ :int = 'hopper-medium-v2'
a_ :Any = gym.make(env_name)
a_ :List[str] = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
a_ :Optional[Any] = env.reset()
a_ :Optional[int] = 0
a_ :List[str] = 0
a_ :List[Any] = 10_00
a_ :Dict = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ :int = pipeline(obs, planning_horizon=32)
# execute action in environment
a_ , a_ , a_ , a_ :List[Any] = env.step(denorm_actions)
a_ :Tuple = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ :List[Any] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 35 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35 | 1 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a_ :List[str] = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
def __init__( self : Dict , *_lowercase : Tuple , **_lowercase : Tuple ):
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 35 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
def a ( A__ , A__ ) -> str:
'''simple docstring'''
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 35 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 | 1 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ :Union[str, Any] = 16
a_ :Any = 32
def a ( A__ , A__ , A__ , A__ , A__ = 1_6 ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ : Any = DatasetDict(
{
'''train''': dataset['''train'''].select(A__ ),
'''validation''': dataset['''train'''].select(A__ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : List[str] = datasets.map(
A__ , batched=A__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Tuple = 8
else:
SCREAMING_SNAKE_CASE__ : int = None
return tokenizer.pad(
A__ , padding='''longest''' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : Optional[int] = DataLoader(
tokenized_datasets['''train'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
SCREAMING_SNAKE_CASE__ : Tuple = DataLoader(
tokenized_datasets['''test'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader, test_dataloader
def a ( A__ , A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# Download the dataset
SCREAMING_SNAKE_CASE__ : List[Any] = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
SCREAMING_SNAKE_CASE__ : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = config['''lr''']
SCREAMING_SNAKE_CASE__ : List[Any] = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE__ : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : List[Any] = MAX_GPU_BATCH_SIZE
set_seed(A__ )
# New Code #
# Create our folds:
SCREAMING_SNAKE_CASE__ : Optional[Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
SCREAMING_SNAKE_CASE__ : List[str] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = get_fold_dataloaders(
A__ , A__ , A__ , A__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Optional[int] = AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=1_0_0 , num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**A__ )
SCREAMING_SNAKE_CASE__ : Tuple = outputs.loss
SCREAMING_SNAKE_CASE__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**A__ )
SCREAMING_SNAKE_CASE__ : Dict = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A__ , references=A__ , )
SCREAMING_SNAKE_CASE__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A__ )
# New Code #
# We also run predictions on the test set at the very end
SCREAMING_SNAKE_CASE__ : List[str] = []
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**A__ )
SCREAMING_SNAKE_CASE__ : List[str] = outputs.logits
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat(A__ , dim=0 )
SCREAMING_SNAKE_CASE__ : List[str] = torch.stack(A__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ : List[str] = metric.compute(predictions=A__ , references=A__ )
accelerator.print('''Average test metrics from all folds:''' , A__ )
def a ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A__ , default=A__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=A__ , default=3 , help='''The number of splits to perform across the dataset''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 35 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str=0.2 , _lowercase : str=0.2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : List[str] = conva_get[:2]
SCREAMING_SNAKE_CASE__ : str = conva_get[2]
SCREAMING_SNAKE_CASE__ : Any = size_pa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_w
SCREAMING_SNAKE_CASE__ : Tuple = rate_t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self : Union[str, Any] , _lowercase : Any ):
# save model dict with pickle
SCREAMING_SNAKE_CASE__ : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowercase , '''wb''' ) as f:
pickle.dump(_lowercase , _lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls : Dict , _lowercase : int ):
# read saved model
with open(_lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pickle.load(_lowercase ) # noqa: S301
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE__ : Dict = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ ( self : str , _lowercase : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
return round(_lowercase , 3 )
def lowercase__ ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
# convolution process
SCREAMING_SNAKE_CASE__ : Tuple = convs[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = convs[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_lowercase )
return focus_list, data_featuremap
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="average_pool" ):
# pooling process
SCREAMING_SNAKE_CASE__ : List[str] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_map in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_lowercase )
return data_expanded
def lowercase__ ( self : Tuple , _lowercase : Optional[int] ):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = 0
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Any = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ : Dict = i_pool + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : int=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ : Any = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ : str = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : int = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Any = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(_lowercase , self.vji )
SCREAMING_SNAKE_CASE__ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ : List[str] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ : Dict = self.rate_weight * np.dot(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rp + 1
SCREAMING_SNAKE_CASE__ : List[str] = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '''+-''' )
plt.plot(_lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self : Union[str, Any] , _lowercase : int ):
# model predict
SCREAMING_SNAKE_CASE__ : Dict = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Any = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ : str = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple ):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE__ : str = np.asmatrix(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 35 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = SwinConfig(image_size=1_9_2 )
if "base" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 6
SCREAMING_SNAKE_CASE__ : str = 1_2_8
SCREAMING_SNAKE_CASE__ : Optional[Any] = (2, 2, 1_8, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (4, 8, 1_6, 3_2)
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Dict = 1_2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1_9_2
SCREAMING_SNAKE_CASE__ : List[str] = (2, 2, 1_8, 2)
SCREAMING_SNAKE_CASE__ : str = (6, 1_2, 2_4, 4_8)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
SCREAMING_SNAKE_CASE__ : int = window_size
SCREAMING_SNAKE_CASE__ : int = embed_dim
SCREAMING_SNAKE_CASE__ : List[Any] = depths
SCREAMING_SNAKE_CASE__ : Dict = num_heads
return config
def a ( A__ ) -> Union[str, Any]:
'''simple docstring'''
if "encoder.mask_token" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
SCREAMING_SNAKE_CASE__ : int = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE__ : int = '''layernorm.weight'''
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE__ : List[str] = '''layernorm.bias'''
if "decoder" in name:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = '''swin.''' + name
return name
def a ( A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Any = orig_state_dict.pop(A__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = key.split('''.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = int(key_split[2] )
SCREAMING_SNAKE_CASE__ : Dict = int(key_split[4] )
SCREAMING_SNAKE_CASE__ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ : int = val[:dim, :]
SCREAMING_SNAKE_CASE__ : Optional[Any] = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : Dict = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = val[
:dim
]
SCREAMING_SNAKE_CASE__ : Optional[int] = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : List[Any] = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE__ : Tuple = val
return orig_state_dict
def a ( A__ , A__ , A__ , A__ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = torch.load(A__ , map_location='''cpu''' )['''model''']
SCREAMING_SNAKE_CASE__ : List[Any] = get_swin_config(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SwinForMaskedImageModeling(A__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = convert_state_dict(A__ , A__ )
model.load_state_dict(A__ )
SCREAMING_SNAKE_CASE__ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ : Dict = ViTImageProcessor(size={'''height''': 1_9_2, '''width''': 1_9_2} )
SCREAMING_SNAKE_CASE__ : Any = Image.open(requests.get(A__ , stream=A__ ).raw )
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(images=A__ , return_tensors='''pt''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**A__ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if push_to_hub:
print(f"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(f"""microsoft/{model_name}""" )
image_processor.push_to_hub(f"""microsoft/{model_name}""" )
if __name__ == "__main__":
a_ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ :str = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 35 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 1 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
a_ :str = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
a_ :int = 'sshleifer/student_marian_en_ro_6_1'
a_ :List[str] = 'sshleifer/tiny-mbart'
@require_torch
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : List[str] , _lowercase : Dict=False , _lowercase : Optional[Any]=None , _lowercase : Dict=True , _lowercase : str=True , _lowercase : Optional[Any]=True , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Any = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
SCREAMING_SNAKE_CASE__ : List[str] = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [log for log in logs if '''eval_loss''' in log.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
SCREAMING_SNAKE_CASE__ : Optional[Any] = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowercase__ ( self : int ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowercase__ ( self : Optional[int] ):
self.run_seqaseq_quick(distributed=_lowercase )
@require_torch_multi_gpu
def lowercase__ ( self : Any ):
self.run_seqaseq_quick(distributed=_lowercase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowercase__ ( self : str ):
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowercase__ ( self : List[Any] ):
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowercase__ ( self : int ):
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowercase__ ( self : Any ):
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase )
@require_apex
@require_torch_gpu
def lowercase__ ( self : Any ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def lowercase__ ( self : Optional[int] , _lowercase : Optional[int] ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
SCREAMING_SNAKE_CASE__ : Dict = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = experiments[experiment_id]
SCREAMING_SNAKE_CASE__ : str = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
SCREAMING_SNAKE_CASE__ : int = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''] )
SCREAMING_SNAKE_CASE__ : Tuple = len(re.findall(_lowercase , cl.err ) )
self.assertEqual(_lowercase , data['''n_matches'''] )
@slow
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
SCREAMING_SNAKE_CASE__ : Any = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''' ) ).log_history
SCREAMING_SNAKE_CASE__ : str = [log for log in logs if '''eval_loss''' in log.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = eval_metrics[0]
SCREAMING_SNAKE_CASE__ : Any = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase )
# test if do_predict saves generations and metrics
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.listdir(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {os.path.basename(_lowercase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowercase__ ( self : Dict ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase : str ) -> Tuple[int, float]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''--skip_memory_metrics 0'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.run_trainer(
max_len=1_28 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
SCREAMING_SNAKE_CASE__ : Any = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''' ) ).log_history
SCREAMING_SNAKE_CASE__ : Tuple = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
SCREAMING_SNAKE_CASE__ : str = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
SCREAMING_SNAKE_CASE__ : str = gpu_peak_mem_orig + gpu_alloc_mem_orig
SCREAMING_SNAKE_CASE__ : List[Any] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
SCREAMING_SNAKE_CASE__ : Optional[Any] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
SCREAMING_SNAKE_CASE__ : List[str] = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
_lowercase , _lowercase , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def lowercase__ ( self : str , _lowercase : int , _lowercase : str , _lowercase : int , _lowercase : float = 3E-3 , _lowercase : str = "adafactor" , _lowercase : bool = False , _lowercase : str = None , _lowercase : int = 0 , _lowercase : bool = True , _lowercase : bool = True , _lowercase : bool = True , _lowercase : bool = True , _lowercase : int = None , ):
SCREAMING_SNAKE_CASE__ : Any = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
SCREAMING_SNAKE_CASE__ : List[str] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Dict = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(_lowercase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(_lowercase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(_lowercase )}
""".split()
SCREAMING_SNAKE_CASE__ : List[Any] = '''
--do_predict
'''.split()
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
SCREAMING_SNAKE_CASE__ : str = get_gpu_count()
SCREAMING_SNAKE_CASE__ : int = get_torch_dist_unique_port()
SCREAMING_SNAKE_CASE__ : Optional[int] = f"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
SCREAMING_SNAKE_CASE__ : int = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env() )
else:
SCREAMING_SNAKE_CASE__ : int = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase ):
main()
return output_dir
| 35 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ :str = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Tuple = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Tuple = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 | 1 |
from math import log
from scipy.constants import Boltzmann, physical_constants
a_ :Dict = 3_00 # TEMPERATURE (unit = K)
def a ( A__ , A__ , A__ , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
import random
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = num - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : int = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pow(A__ , A__ , A__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : Any = i + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (v**2) % num
return True
def a ( A__ ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A__ )
def a ( A__ = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A__ ):
return num
if __name__ == "__main__":
a_ :Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 35 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self : Tuple , _lowercase : List[Any] , _lowercase : str=13 , _lowercase : Dict=3 , _lowercase : Union[str, Any]=True , _lowercase : str=True , _lowercase : Dict=0.1 , _lowercase : List[str]=0.1 , _lowercase : Optional[Any]=2_24 , _lowercase : Optional[Any]=10_00 , _lowercase : str=[3, 3, 6, 4] , _lowercase : List[str]=[48, 56, 1_12, 2_20] , ):
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : str = use_labels
SCREAMING_SNAKE_CASE__ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = num_labels
SCREAMING_SNAKE_CASE__ : Any = image_size
SCREAMING_SNAKE_CASE__ : List[Any] = layer_depths
SCREAMING_SNAKE_CASE__ : Optional[Any] = embed_dims
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : int ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowercase , layer_scale_init_value=1E-5 , )
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = SwiftFormerModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowercase__ ( self : Optional[Any] , _lowercase : List[str] , _lowercase : Any , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Dict = self.num_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = SwiftFormerForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE__ : List[Any] = SwiftFormerForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Any = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict ):
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase : Tuple = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : str = False
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : str = False
lowerCamelCase : Optional[Any] = False
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowercase__ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowercase__ ( self : Tuple ):
pass
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowercase__ ( self : Any ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Any = SwiftFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowercase__ ( self : Union[str, Any] ):
pass
def lowercase__ ( self : Union[str, Any] ):
def check_hidden_states_output(_lowercase : Optional[Any] , _lowercase : int , _lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**self._prepare_for_class(_lowercase , _lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Optional[int] = 8
self.assertEqual(len(_lowercase ) , _lowercase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowercase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : int = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowercase__ ( self : Union[str, Any] ):
def _config_zero_init(_lowercase : str ):
SCREAMING_SNAKE_CASE__ : Tuple = copy.deepcopy(_lowercase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowercase , _lowercase , 1E-10 )
if isinstance(getattr(_lowercase , _lowercase , _lowercase ) , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _config_zero_init(getattr(_lowercase , _lowercase ) )
setattr(_lowercase , _lowercase , _lowercase )
return configs_no_init
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = _config_zero_init(_lowercase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(config=_lowercase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Optional[Any] ):
pass
def a ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[int] ):
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : int = image_processor(images=_lowercase , return_tensors='''pt''' ).to(_lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**_lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
| 35 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35 | 1 |
import mpmath # for roots of unity
import numpy as np
class lowercase :
def __init__( self : List[str] , _lowercase : Optional[int]=None , _lowercase : Optional[int]=None ):
# Input as list
SCREAMING_SNAKE_CASE__ : List[str] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Dict = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : Any = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Any = self.__multiply()
def lowercase__ ( self : Optional[int] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(_lowercase ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Dict = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : str = [[] for i in range(_lowercase )]
SCREAMING_SNAKE_CASE__ : List[Any] = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowercase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowercase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Dict = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Tuple = self.__dft('''A''' )
SCREAMING_SNAKE_CASE__ : int = self.__dft('''B''' )
SCREAMING_SNAKE_CASE__ : int = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : int = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(_lowercase )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : List[str] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Tuple = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Tuple = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
SCREAMING_SNAKE_CASE__ : Optional[int] = '''A = ''' + ''' + '''.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : int = '''B = ''' + ''' + '''.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : List[str] = '''A*B = ''' + ''' + '''.join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a_ :Union[str, Any] = None
a_ :str = logging.get_logger(__name__)
a_ :str = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a_ :Union[str, Any] = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a_ :Optional[int] = {
't5-small': 5_12,
't5-base': 5_12,
't5-large': 5_12,
't5-3b': 5_12,
't5-11b': 5_12,
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Any = VOCAB_FILES_NAMES
lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = ['''input_ids''', '''attention_mask''']
lowerCamelCase : Dict = TaTokenizer
lowerCamelCase : List[int] = []
def __init__( self : Optional[int] , _lowercase : Any=None , _lowercase : List[Any]=None , _lowercase : Dict="</s>" , _lowercase : int="<unk>" , _lowercase : int="<pad>" , _lowercase : List[Any]=1_00 , _lowercase : Dict=None , **_lowercase : Tuple , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE__ : List[str] = [f"""<extra_id_{i}>""" for i in range(_lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(set(filter(lambda _lowercase : bool('''extra_id_''' in str(_lowercase ) ) , _lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
_lowercase , tokenizer_file=_lowercase , eos_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , extra_ids=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : int = vocab_file
SCREAMING_SNAKE_CASE__ : List[Any] = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE__ : Tuple = extra_ids
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : str , _lowercase : Union[str, Any] ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _lowercase , )
return max_model_length
def lowercase__ ( self : List[str] , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def lowercase__ ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
SCREAMING_SNAKE_CASE__ : Any = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowercase__ ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowercase__ ( self : List[str] ):
return list(
set(filter(lambda _lowercase : bool(re.search(R'''<extra_id_\d+>''' , _lowercase ) ) is not None , self.additional_special_tokens ) ) )
def lowercase__ ( self : List[str] ):
return [self.convert_tokens_to_ids(_lowercase ) for token in self.get_sentinel_tokens()]
| 35 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 | 1 |
def a ( A__ ) -> list:
'''simple docstring'''
if n_term == "":
return []
SCREAMING_SNAKE_CASE__ : list = []
for temp in range(int(A__ ) ):
series.append(f"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
a_ :Optional[Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 35 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 1 |
from collections import Counter
from timeit import timeit
def a ( A__ = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def a ( A__ = "" ) -> bool:
'''simple docstring'''
if len(A__ ) == 0:
return True
SCREAMING_SNAKE_CASE__ : int = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
SCREAMING_SNAKE_CASE__ : dict[str, int] = {}
for character in lower_case_input_str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = character_freq_dict.get(A__ , 0 ) + 1
SCREAMING_SNAKE_CASE__ : str = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a ( A__ = "" ) -> None:
'''simple docstring'''
print('''\nFor string = ''' , A__ , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(A__ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(A__ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
a_ :Tuple = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
a_ :List[Any] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 35 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ :Tuple = logging.get_logger(__name__)
a_ :Any = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[Any] = '''van'''
def __init__( self : List[Any] , _lowercase : int=2_24 , _lowercase : Dict=3 , _lowercase : Tuple=[7, 3, 3, 3] , _lowercase : str=[4, 2, 2, 2] , _lowercase : Tuple=[64, 1_28, 3_20, 5_12] , _lowercase : List[str]=[3, 3, 12, 3] , _lowercase : Union[str, Any]=[8, 8, 4, 4] , _lowercase : Optional[int]="gelu" , _lowercase : Optional[int]=0.02 , _lowercase : List[Any]=1E-6 , _lowercase : Optional[Any]=1E-2 , _lowercase : str=0.0 , _lowercase : Tuple=0.0 , **_lowercase : Tuple , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_sizes
SCREAMING_SNAKE_CASE__ : List[Any] = strides
SCREAMING_SNAKE_CASE__ : str = hidden_sizes
SCREAMING_SNAKE_CASE__ : Dict = depths
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mlp_ratios
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = layer_scale_init_value
SCREAMING_SNAKE_CASE__ : List[Any] = drop_path_rate
SCREAMING_SNAKE_CASE__ : Any = dropout_rate
| 35 |
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 | 1 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowercase :
def __init__( self : Optional[int] , _lowercase : str , _lowercase : Tuple=13 , _lowercase : Union[str, Any]=32 , _lowercase : str=2 , _lowercase : Dict=3 , _lowercase : str=16 , _lowercase : str=[1, 2, 1] , _lowercase : str=[2, 2, 4] , _lowercase : str=2 , _lowercase : List[str]=2.0 , _lowercase : Optional[int]=True , _lowercase : Union[str, Any]=0.0 , _lowercase : List[str]=0.0 , _lowercase : Optional[int]=0.1 , _lowercase : Optional[int]="gelu" , _lowercase : Union[str, Any]=False , _lowercase : Tuple=True , _lowercase : Any=0.02 , _lowercase : Optional[Any]=1E-5 , _lowercase : Optional[int]=True , _lowercase : str=None , _lowercase : Optional[Any]=True , _lowercase : List[str]=10 , _lowercase : Optional[Any]=8 , _lowercase : Any=["stage1", "stage2", "stage3"] , _lowercase : str=[1, 2, 3] , ):
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = embed_dim
SCREAMING_SNAKE_CASE__ : Any = depths
SCREAMING_SNAKE_CASE__ : int = num_heads
SCREAMING_SNAKE_CASE__ : str = window_size
SCREAMING_SNAKE_CASE__ : Optional[int] = mlp_ratio
SCREAMING_SNAKE_CASE__ : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = drop_path_rate
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : int = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : int = patch_norm
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : Optional[int] = scope
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_stride
SCREAMING_SNAKE_CASE__ : Any = out_features
SCREAMING_SNAKE_CASE__ : List[Any] = out_indices
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Dict = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Union[str, Any] ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase__ ( self : Any , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Tuple ):
SCREAMING_SNAKE_CASE__ : List[Any] = MaskFormerSwinModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE__ : Any = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase__ ( self : Tuple , _lowercase : int , _lowercase : Any , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Any = MaskFormerSwinBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ : str = ['''stem''']
SCREAMING_SNAKE_CASE__ : str = MaskFormerSwinBackbone(config=_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = config_and_inputs
SCREAMING_SNAKE_CASE__ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : str = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase : str = False
lowerCamelCase : List[Any] = False
lowerCamelCase : Any = False
lowerCamelCase : str = False
lowerCamelCase : List[str] = False
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Dict = MaskFormerSwinModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self , config_class=_lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Optional[Any] ):
return
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
@unittest.skip('''Swin does not use inputs_embeds''' )
def lowercase__ ( self : Dict ):
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def lowercase__ ( self : Tuple ):
pass
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def lowercase__ ( self : Union[str, Any] ):
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def lowercase__ ( self : List[str] ):
pass
def lowercase__ ( self : List[Any] , _lowercase : int , _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) )
SCREAMING_SNAKE_CASE__ : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
# Swin has a different seq_length
SCREAMING_SNAKE_CASE__ : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Dict = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Dict = 3
SCREAMING_SNAKE_CASE__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE__ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def lowercase__ ( self : Any ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def lowercase__ ( self : List[Any] ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowercase : Tuple ):
SCREAMING_SNAKE_CASE__ : Tuple = 0
return t
def check_equivalence(_lowercase : int , _lowercase : int , _lowercase : Dict , _lowercase : Optional[int]={} ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**_lowercase , return_dict=_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(**_lowercase , return_dict=_lowercase , **_lowercase ).to_tuple()
def recursive_check(_lowercase : List[Any] , _lowercase : int ):
if isinstance(_lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowercase , _lowercase ):
recursive_check(_lowercase , _lowercase )
elif isinstance(_lowercase , _lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowercase , _lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowercase ) , set_nan_tensor_to_zero(_lowercase ) , atol=1E-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}. Dict has"""
f""" `nan`: {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}."""
) , )
recursive_check(_lowercase , _lowercase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = model_class(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} )
@require_torch
class lowercase ( unittest.TestCase , _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase : List[Any] = MaskFormerSwinConfig
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : str = MaskFormerSwinModelTester(self )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[str] = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = backbone_class(_lowercase )
backbone.to(_lowercase )
backbone.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = backbone(**_lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = backbone(**_lowercase , output_hidden_states=_lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
SCREAMING_SNAKE_CASE__ : Dict = backbone(**_lowercase , output_attentions=_lowercase )
self.assertIsNotNone(outputs.attentions )
| 35 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 1 |
import unittest
from knapsack import knapsack as k
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = [0]
SCREAMING_SNAKE_CASE__ : Tuple = [0]
SCREAMING_SNAKE_CASE__ : str = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [60]
SCREAMING_SNAKE_CASE__ : Dict = [10]
SCREAMING_SNAKE_CASE__ : Dict = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Tuple = 3
SCREAMING_SNAKE_CASE__ : Tuple = [1, 2, 3]
SCREAMING_SNAKE_CASE__ : Any = [3, 2, 1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = 50
SCREAMING_SNAKE_CASE__ : Tuple = [60, 1_00, 1_20]
SCREAMING_SNAKE_CASE__ : Any = [10, 20, 30]
SCREAMING_SNAKE_CASE__ : int = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 2_20 )
if __name__ == "__main__":
unittest.main()
| 35 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ :Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ :List[Any] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def a ( A__ , A__ , A__=8 ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE__ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowercase ( _UpperCAmelCase ):
def __init__( self : Optional[Any] , _lowercase : UNetaDConditionModel , _lowercase : DDPMScheduler , _lowercase : VQModel , ):
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowercase__ ( self : Union[str, Any] , _lowercase : Tuple , _lowercase : Tuple , _lowercase : str , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : Dict ):
if latents is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
SCREAMING_SNAKE_CASE__ : Dict = latents.to(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = latents * scheduler.init_noise_sigma
return latents
def lowercase__ ( self : Any , _lowercase : Optional[int]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE__ : str = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowercase__ ( self : List[Any] , _lowercase : List[str]=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE__ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE__ : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase__ ( self : Optional[Any] ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self : Dict , _lowercase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase : int = 5_12 , _lowercase : int = 5_12 , _lowercase : int = 1_00 , _lowercase : float = 4.0 , _lowercase : int = 1 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._execution_device
SCREAMING_SNAKE_CASE__ : Optional[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(_lowercase , dim=0 )
SCREAMING_SNAKE_CASE__ : Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Any = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : Tuple = image_embeds.repeat_interleave(_lowercase , dim=0 )
SCREAMING_SNAKE_CASE__ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
SCREAMING_SNAKE_CASE__ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.scheduler.timesteps
SCREAMING_SNAKE_CASE__ : Dict = self.unet.config.in_channels
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'''image_embeds''': image_embeds}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE__ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
SCREAMING_SNAKE_CASE__ : Tuple = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE__ : List[str] = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE__ : Optional[int] = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : str = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 35 |
def a ( A__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
from __future__ import annotations
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = str(A__ )
return len(A__ ) == 9 and set(A__ ) == set('''123456789''' )
def a ( ) -> int | None:
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1_0_0_0_0_2 * base_num
if is_9_pandigital(A__ ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
SCREAMING_SNAKE_CASE__ : Tuple = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(A__ ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35 | 1 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self : Dict , _lowercase : Tuple , _lowercase : Optional[Any]=2 , _lowercase : int=56 , _lowercase : List[str]=True , _lowercase : Tuple=True , _lowercase : Optional[Any]=True , _lowercase : Optional[Any]=True , _lowercase : List[str]=99 , _lowercase : Dict=32 , _lowercase : Dict=2 , _lowercase : str=2 , _lowercase : Union[str, Any]=7 , _lowercase : Union[str, Any]="gelu_new" , _lowercase : List[Any]=0.1 , _lowercase : str=0.1 , _lowercase : Dict=5_12 , _lowercase : List[Any]=16 , _lowercase : Optional[int]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[int]=4 , _lowercase : str="block_sparse" , _lowercase : str=True , _lowercase : List[str]=False , _lowercase : Any=2 , _lowercase : Tuple=3 , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE__ : Tuple = is_training
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_attention_mask
SCREAMING_SNAKE_CASE__ : int = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Dict = use_labels
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_choices
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = attention_type
SCREAMING_SNAKE_CASE__ : List[Any] = use_bias
SCREAMING_SNAKE_CASE__ : int = block_size
SCREAMING_SNAKE_CASE__ : Dict = num_random_blocks
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : str = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCamelCase : int = False
lowerCamelCase : Optional[int] = False
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Tuple = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase__ ( self : Optional[int] ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase__ ( self : Tuple ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase__ ( self : Optional[int] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase__ ( self : Tuple ):
super().test_hidden_states_output()
@slow
def lowercase__ ( self : Tuple ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(_lowercase )
def lowercase__ ( self : Union[str, Any] ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : int = model_class(_lowercase )
@jax.jit
def model_jitted(_lowercase : Optional[int] , _lowercase : List[Any]=None , **_lowercase : Tuple ):
return model(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_jitted(**_lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self : List[Any] , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Tuple=1E-5 , _lowercase : Dict="outputs" , _lowercase : Optional[Any]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
| 35 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[str] = num_of_nodes
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Optional[int] , _lowercase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Any = self.find_component(_lowercase )
def lowercase__ ( self : int , _lowercase : list[int] , _lowercase : int , _lowercase : int ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Tuple = self.m_component[u]
SCREAMING_SNAKE_CASE__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = edge
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def a ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=A__ )
SCREAMING_SNAKE_CASE__ : List[Any] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=A__ )
env_command_parser(subparsers=A__ )
launch_command_parser(subparsers=A__ )
tpu_command_parser(subparsers=A__ )
test_command_parser(subparsers=A__ )
# Let's go
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
if not hasattr(A__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(A__ )
if __name__ == "__main__":
main()
| 35 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ :Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def a ( A__ , A__ , A__ , A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE__ : str = {
'''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2],
'''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1],
'''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5],
}
SCREAMING_SNAKE_CASE__ : int = f"""{src_lang}-{tgt_lang}"""
SCREAMING_SNAKE_CASE__ : str = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=A__ , exist_ok=A__ )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(A__ , '''README.md''' )
print(f"""Generating {path}""" )
with open(A__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(A__ )
# make sure we are under the root of the project
a_ :Optional[Any] = Path(__file__).resolve().parent.parent.parent
a_ :Union[str, Any] = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ :Union[str, Any] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 35 |
def a ( A__ ) -> str:
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def a ( A__ ) -> bytes:
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ :List[str] = logging.get_logger(__name__)
a_ :str = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : int = '''roberta-prelayernorm'''
def __init__( self : int , _lowercase : Any=5_02_65 , _lowercase : Union[str, Any]=7_68 , _lowercase : Any=12 , _lowercase : List[Any]=12 , _lowercase : List[str]=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : str=0.1 , _lowercase : Dict=0.1 , _lowercase : Optional[int]=5_12 , _lowercase : Any=2 , _lowercase : str=0.02 , _lowercase : Optional[int]=1E-12 , _lowercase : Tuple=1 , _lowercase : Optional[int]=0 , _lowercase : int=2 , _lowercase : Optional[Any]="absolute" , _lowercase : List[str]=True , _lowercase : int=None , **_lowercase : List[Any] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE__ : str = classifier_dropout
class lowercase ( _UpperCAmelCase ):
@property
def lowercase__ ( self : Optional[Any] ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ : int = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 35 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35 | 1 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
a_ :Tuple = object()
# For specifying empty leaf dict `{}`
a_ :str = object()
def a ( A__ , A__ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(A__ ) - len(A__ ) + 1 ):
SCREAMING_SNAKE_CASE__ : str = [x.match(A__ ) for x, y in zip(A__ , ks[i:] )]
if matches and all(A__ ):
return True
return False
def a ( A__ ) -> Dict:
'''simple docstring'''
def replace(A__ , A__ ):
for rule, replacement in rules:
if _match(A__ , A__ ):
return replacement
return val
return replace
def a ( ) -> Optional[int]:
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , A__ )),
(("transformer", "wte", "embedding"), P('''mp''' , A__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(A__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , A__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(A__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , A__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a ( A__ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = _get_partition_rules()
SCREAMING_SNAKE_CASE__ : Dict = _replacement_rules(A__ )
SCREAMING_SNAKE_CASE__ : List[str] = {k: _unmatched for k in flatten_dict(A__ )}
SCREAMING_SNAKE_CASE__ : str = {k: replace(A__ , A__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(A__ ) )
| 35 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ :str = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Tuple = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Dict = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Union[str, Any] = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a_ :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 35 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str=0.2 , _lowercase : str=0.2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : List[str] = conva_get[:2]
SCREAMING_SNAKE_CASE__ : str = conva_get[2]
SCREAMING_SNAKE_CASE__ : Any = size_pa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_w
SCREAMING_SNAKE_CASE__ : Tuple = rate_t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self : Union[str, Any] , _lowercase : Any ):
# save model dict with pickle
SCREAMING_SNAKE_CASE__ : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowercase , '''wb''' ) as f:
pickle.dump(_lowercase , _lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls : Dict , _lowercase : int ):
# read saved model
with open(_lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pickle.load(_lowercase ) # noqa: S301
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE__ : Dict = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ ( self : str , _lowercase : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
return round(_lowercase , 3 )
def lowercase__ ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
# convolution process
SCREAMING_SNAKE_CASE__ : Tuple = convs[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = convs[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_lowercase )
return focus_list, data_featuremap
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="average_pool" ):
# pooling process
SCREAMING_SNAKE_CASE__ : List[str] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_map in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_lowercase )
return data_expanded
def lowercase__ ( self : Tuple , _lowercase : Optional[int] ):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = 0
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Any = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ : Dict = i_pool + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : int=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ : Any = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ : str = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : int = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Any = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(_lowercase , self.vji )
SCREAMING_SNAKE_CASE__ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ : List[str] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ : Dict = self.rate_weight * np.dot(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rp + 1
SCREAMING_SNAKE_CASE__ : List[str] = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '''+-''' )
plt.plot(_lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self : Union[str, Any] , _lowercase : int ):
# model predict
SCREAMING_SNAKE_CASE__ : Dict = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Any = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ : str = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple ):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE__ : str = np.asmatrix(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 35 | 1 |
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 1 |
def a ( A__ ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
SCREAMING_SNAKE_CASE__ : List[Any] = grid[0]
for row_n in range(1 , len(A__ ) ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[row_n]
SCREAMING_SNAKE_CASE__ : Dict = fill_row(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Tuple = grid[row_n]
return grid[-1][-1]
def a ( A__ , A__ ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(A__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35 | 1 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[str] = num_of_nodes
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Optional[int] , _lowercase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Any = self.find_component(_lowercase )
def lowercase__ ( self : int , _lowercase : list[int] , _lowercase : int , _lowercase : int ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Tuple = self.m_component[u]
SCREAMING_SNAKE_CASE__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = edge
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str=0.2 , _lowercase : str=0.2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : List[str] = conva_get[:2]
SCREAMING_SNAKE_CASE__ : str = conva_get[2]
SCREAMING_SNAKE_CASE__ : Any = size_pa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_w
SCREAMING_SNAKE_CASE__ : Tuple = rate_t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self : Union[str, Any] , _lowercase : Any ):
# save model dict with pickle
SCREAMING_SNAKE_CASE__ : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowercase , '''wb''' ) as f:
pickle.dump(_lowercase , _lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls : Dict , _lowercase : int ):
# read saved model
with open(_lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pickle.load(_lowercase ) # noqa: S301
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE__ : Dict = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ ( self : str , _lowercase : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
return round(_lowercase , 3 )
def lowercase__ ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
# convolution process
SCREAMING_SNAKE_CASE__ : Tuple = convs[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = convs[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_lowercase )
return focus_list, data_featuremap
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="average_pool" ):
# pooling process
SCREAMING_SNAKE_CASE__ : List[str] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_map in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_lowercase )
return data_expanded
def lowercase__ ( self : Tuple , _lowercase : Optional[int] ):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = 0
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Any = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ : Dict = i_pool + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : int=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ : Any = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ : str = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : int = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Any = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(_lowercase , self.vji )
SCREAMING_SNAKE_CASE__ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ : List[str] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ : Dict = self.rate_weight * np.dot(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rp + 1
SCREAMING_SNAKE_CASE__ : List[str] = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '''+-''' )
plt.plot(_lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self : Union[str, Any] , _lowercase : int ):
# model predict
SCREAMING_SNAKE_CASE__ : Dict = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Any = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ : str = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple ):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE__ : str = np.asmatrix(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 35 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 | 1 |
def a ( ) -> int:
'''simple docstring'''
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9 )
for b in range(A__ , 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 |
import random
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = num - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : int = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pow(A__ , A__ , A__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : Any = i + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (v**2) % num
return True
def a ( A__ ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A__ )
def a ( A__ = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A__ ):
return num
if __name__ == "__main__":
a_ :Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 35 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
a_ :List[Any] = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
def __init__( self : List[str] , *_lowercase : Tuple , **_lowercase : Optional[Any] ):
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 35 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ :Optional[Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Union[str, Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
a_ :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 | 1 |
def a ( A__ , A__ ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = len(A__ )
SCREAMING_SNAKE_CASE__ : int = []
for i in range(len(A__ ) - pat_len + 1 ):
SCREAMING_SNAKE_CASE__ : Any = True
for j in range(A__ ):
if s[i + j] != pattern[j]:
SCREAMING_SNAKE_CASE__ : Any = False
break
if match_found:
position.append(A__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 35 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 | 1 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : List[Any] , **_lowercase : List[str] ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def a ( A__ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(A__ )
SCREAMING_SNAKE_CASE__ : Dict = npimg.shape
return {"hash": hashimage(A__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : Dict = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCamelCase : Union[str, Any] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowercase__ ( self : List[str] , _lowercase : Any , _lowercase : str , _lowercase : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = MaskGenerationPipeline(model=_lowercase , image_processor=_lowercase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : List[Any] , _lowercase : int , _lowercase : Dict ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def lowercase__ ( self : Tuple ):
pass
@slow
@require_torch
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : str = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
SCREAMING_SNAKE_CASE__ : List[Any] = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=2_56 )
# Shortening by hashing
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_lowercase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_80, 6_40)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (4_80, 6_40)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (4_80, 6_40)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (4_80, 6_40)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (4_80, 6_40)}, '''scores''': 0.9262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (4_80, 6_40)}, '''scores''': 0.8999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (4_80, 6_40)}, '''scores''': 0.8986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (4_80, 6_40)}, '''scores''': 0.8984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (4_80, 6_40)}, '''scores''': 0.8873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (4_80, 6_40)}, '''scores''': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Tuple = '''facebook/sam-vit-huge'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''mask-generation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_lowercase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_80, 6_40)}, '''scores''': 1.0053},
] , )
| 35 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 1 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Any = CustomTokenizer
pass
| 35 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
lowerCamelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
lowerCamelCase : str = "text"
lowerCamelCase : str = "labels"
def lowercase__ ( self : Optional[int] , _lowercase : Dict ):
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _lowercase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
SCREAMING_SNAKE_CASE__ : int = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ : List[str] = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ : List[Any] = features[self.label_column]
SCREAMING_SNAKE_CASE__ : str = label_schema
return task_template
@property
def lowercase__ ( self : Optional[int] ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 35 |
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 | 1 |
import requests
from bsa import BeautifulSoup
def a ( A__ , A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = BeautifulSoup(requests.get(A__ , params=A__ ).content , '''html.parser''' )
SCREAMING_SNAKE_CASE__ : List[str] = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
SCREAMING_SNAKE_CASE__ : List[str] = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
a_ :Optional[int] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 20_18,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 35 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a_ :Optional[int] = logging.get_logger(__name__)
a_ :Union[str, Any] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = '''marian'''
lowerCamelCase : Tuple = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[Any] , _lowercase : str=5_81_01 , _lowercase : Union[str, Any]=None , _lowercase : Tuple=10_24 , _lowercase : List[Any]=12 , _lowercase : int=40_96 , _lowercase : int=16 , _lowercase : str=12 , _lowercase : List[str]=40_96 , _lowercase : Tuple=16 , _lowercase : List[Any]=0.0 , _lowercase : Any=0.0 , _lowercase : List[Any]=True , _lowercase : Dict=True , _lowercase : Union[str, Any]="gelu" , _lowercase : int=10_24 , _lowercase : Optional[Any]=0.1 , _lowercase : List[Any]=0.0 , _lowercase : Optional[int]=0.0 , _lowercase : str=0.02 , _lowercase : Tuple=5_81_00 , _lowercase : int=False , _lowercase : Any=5_81_00 , _lowercase : Tuple=0 , _lowercase : Tuple=0 , _lowercase : List[Any]=True , **_lowercase : int , ):
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = decoder_vocab_size or vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : int = d_model
SCREAMING_SNAKE_CASE__ : Tuple = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : int = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Tuple = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : int = dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE__ : List[str] = activation_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE__ : Tuple = init_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = use_cache
SCREAMING_SNAKE_CASE__ : str = encoder_layers
SCREAMING_SNAKE_CASE__ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
class lowercase ( _UpperCAmelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowercase__ ( self : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ : Any = {0: '''batch'''}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.num_layers
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE__ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE__ : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowercase__ ( self : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super(_lowercase , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_layers
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE__ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowercase__ ( self : Optional[Any] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE__ : str = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Generate decoder inputs
SCREAMING_SNAKE_CASE__ : str = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE__ : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Dict = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE__ : Optional[int] = dict(**_lowercase , **_lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE__ : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : str = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowercase , _lowercase )] , dim=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.num_layers
SCREAMING_SNAKE_CASE__ : str = min(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = max(_lowercase , _lowercase ) - min_num_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowercase , _lowercase ):
common_inputs["past_key_values"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) )
return common_inputs
def lowercase__ ( self : int , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE__ : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : Any = seqlen + 2
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Any = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
SCREAMING_SNAKE_CASE__ : List[str] = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase )
]
return common_inputs
def lowercase__ ( self : Optional[Any] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : str = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : str = tokenizer.num_special_tokens_to_add(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : List[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(tokenizer(_lowercase , return_tensors=_lowercase ) )
return common_inputs
def lowercase__ ( self : List[str] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = self._generate_dummy_inputs_for_causal_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
return common_inputs
def lowercase__ ( self : str , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : Optional[int] , _lowercase : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super()._flatten_past_key_values_(_lowercase , _lowercase , _lowercase , _lowercase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = super(_lowercase , self )._flatten_past_key_values_(
_lowercase , _lowercase , _lowercase , _lowercase )
@property
def lowercase__ ( self : Dict ):
return 1E-4
| 35 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 | 1 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a ( A__ ) -> int:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
a_ :Optional[int] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_lowercase , required=_lowercase , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_lowercase , required=_lowercase , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_lowercase , required=_lowercase , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_lowercase , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_lowercase , default=_lowercase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_lowercase )
def __init__( self : Any , _lowercase : str , _lowercase : str , _lowercase : str , _lowercase : str , _lowercase : str , *_lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(f"""Loading model {model_type}""" )
SCREAMING_SNAKE_CASE__ : Dict = model_type
SCREAMING_SNAKE_CASE__ : Any = tf_checkpoint
SCREAMING_SNAKE_CASE__ : List[Any] = pytorch_dump_output
SCREAMING_SNAKE_CASE__ : Union[str, Any] = config
SCREAMING_SNAKE_CASE__ : int = finetuning_task_name
def lowercase__ ( self : Any ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
if "ckpt" in self._tf_checkpoint.lower():
SCREAMING_SNAKE_CASE__ : int = self._tf_checkpoint
SCREAMING_SNAKE_CASE__ : Optional[int] = ''''''
else:
SCREAMING_SNAKE_CASE__ : int = self._tf_checkpoint
SCREAMING_SNAKE_CASE__ : List[Any] = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_lowercase , self._config , self._pytorch_dump_output , _lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 35 |
def a ( A__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
a_ :Any = '<<<<<<< This should probably be modified because it mentions: '
a_ :int = '=======\n>>>>>>>\n'
a_ :Dict = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
a_ :int = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_lowercase , required=_lowercase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_lowercase , required=_lowercase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowercase )
def __init__( self : str , _lowercase : str , _lowercase : str , *_lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_logger('''datasets-cli/converting''' )
SCREAMING_SNAKE_CASE__ : Tuple = tfds_path
SCREAMING_SNAKE_CASE__ : List[Any] = datasets_directory
def lowercase__ ( self : Union[str, Any] ):
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE__ : Any = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
SCREAMING_SNAKE_CASE__ : int = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : str = {}
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.listdir(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : int = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
SCREAMING_SNAKE_CASE__ : int = os.path.join(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : int = os.path.join(_lowercase , _lowercase )
if not os.path.isfile(_lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowercase , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = f.readlines()
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for line in lines:
SCREAMING_SNAKE_CASE__ : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
SCREAMING_SNAKE_CASE__ : Any = ''''''
continue
elif "from absl import logging" in out_line:
SCREAMING_SNAKE_CASE__ : Dict = '''from datasets import logging\n'''
elif "getLogger" in out_line:
SCREAMING_SNAKE_CASE__ : Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(filter(lambda _lowercase : e in out_line , _lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowercase ) + '''\n''' )
out_lines.append(_lowercase )
out_lines.append(_lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.sub(_lowercase , _lowercase , _lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
SCREAMING_SNAKE_CASE__ : str = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
SCREAMING_SNAKE_CASE__ : List[Any] = True
out_lines.append(_lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
SCREAMING_SNAKE_CASE__ : List[Any] = f_name.replace('''.py''' , '''''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(_lowercase , _lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowercase )
if needs_manual_update:
with_manual_update.append(_lowercase )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_lowercase )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
SCREAMING_SNAKE_CASE__ : Any = os.path.basename(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_lowercase , _lowercase )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 35 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35 | 1 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a_ :Optional[Any] = logging.get_logger(__name__)
a_ :Optional[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ :Tuple = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
a_ :Tuple = {
'facebook/blenderbot_small-90M': 5_12,
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : str = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = BlenderbotSmallTokenizer
def __init__( self : List[Any] , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None , _lowercase : List[Any]="<|endoftext|>" , _lowercase : List[Any]="<|endoftext|>" , _lowercase : Any="<|endoftext|>" , _lowercase : int=False , _lowercase : Optional[int]=True , **_lowercase : List[Any] , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=_lowercase , merges=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase , ) , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : List[str] = add_prefix_space
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[int]=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 35 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[str] = num_of_nodes
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Optional[int] , _lowercase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Any = self.find_component(_lowercase )
def lowercase__ ( self : int , _lowercase : list[int] , _lowercase : int , _lowercase : int ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Tuple = self.m_component[u]
SCREAMING_SNAKE_CASE__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = edge
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a_ :Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( _UpperCAmelCase ):
def __init__( self : Dict , _lowercase : AutoencoderKL , _lowercase : CLIPTextModel , _lowercase : CLIPTokenizer , _lowercase : UNetaDConditionModel , _lowercase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _lowercase : StableDiffusionSafetyChecker , _lowercase : CLIPImageProcessor , ):
super().__init__()
self.register_modules(
vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE__ : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def lowercase__ ( self : List[Any] ):
self.enable_attention_slicing(_lowercase )
@torch.no_grad()
def __call__( self : Any , _lowercase : Union[str, List[str]] , _lowercase : int = 5_12 , _lowercase : int = 5_12 , _lowercase : int = 50 , _lowercase : float = 7.5 , _lowercase : Optional[Union[str, List[str]]] = None , _lowercase : Optional[int] = 1 , _lowercase : float = 0.0 , _lowercase : Optional[torch.Generator] = None , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , _lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowercase : int = 1 , _lowercase : Optional[torch.FloatTensor] = None , **_lowercase : Dict , ):
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : str = 1
elif isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_lowercase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_lowercase )}.""" )
# get prompt text embeddings
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(
_lowercase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
SCREAMING_SNAKE_CASE__ : int = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = text_embeddings.shape
SCREAMING_SNAKE_CASE__ : Any = text_embeddings.repeat(1 , _lowercase , 1 )
SCREAMING_SNAKE_CASE__ : Tuple = text_embeddings.view(bs_embed * num_images_per_prompt , _lowercase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE__ : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : List[str]
if negative_prompt is None:
SCREAMING_SNAKE_CASE__ : Any = ['''''']
elif type(_lowercase ) is not type(_lowercase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_lowercase )} !="""
f""" {type(_lowercase )}.""" )
elif isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = [negative_prompt]
elif batch_size != len(_lowercase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_lowercase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
SCREAMING_SNAKE_CASE__ : Any = negative_prompt
SCREAMING_SNAKE_CASE__ : Tuple = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer(
_lowercase , padding='''max_length''' , max_length=_lowercase , truncation=_lowercase , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE__ : int = uncond_embeddings.shape[1]
SCREAMING_SNAKE_CASE__ : List[Any] = uncond_embeddings.repeat(_lowercase , _lowercase , 1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE__ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
SCREAMING_SNAKE_CASE__ : str = torch.randn(
_lowercase , generator=_lowercase , device='''cpu''' , dtype=_lowercase ).to(self.device )
SCREAMING_SNAKE_CASE__ : List[str] = torch.randn(_lowercase , generator=_lowercase , device='''cpu''' , dtype=_lowercase ).to(
self.device )
else:
SCREAMING_SNAKE_CASE__ : str = torch.randn(
_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.randn(_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = latents_reference.to(self.device )
SCREAMING_SNAKE_CASE__ : List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
SCREAMING_SNAKE_CASE__ : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
SCREAMING_SNAKE_CASE__ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
SCREAMING_SNAKE_CASE__ : Optional[Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
SCREAMING_SNAKE_CASE__ : str = 0 if dx < 0 else dx
SCREAMING_SNAKE_CASE__ : int = 0 if dy < 0 else dy
SCREAMING_SNAKE_CASE__ : Any = max(-dx , 0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
SCREAMING_SNAKE_CASE__ : List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE__ : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE__ : Optional[int] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE__ : Any = {}
if accepts_eta:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = eta
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__ : str = self.scheduler.scale_model_input(_lowercase , _lowercase )
# predict the noise residual
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.unet(_lowercase , _lowercase , encoder_hidden_states=_lowercase ).sample
# perform guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Dict = 1 / 0.18215 * latents
SCREAMING_SNAKE_CASE__ : Dict = self.vae.decode(_lowercase ).sample
SCREAMING_SNAKE_CASE__ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
SCREAMING_SNAKE_CASE__ : Any = self.feature_extractor(self.numpy_to_pil(_lowercase ) , return_tensors='''pt''' ).to(
self.device )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.safety_checker(
images=_lowercase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_lowercase , nsfw_content_detected=_lowercase )
| 35 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ :Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
import sys
def a ( A__ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = len(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = [[0 for x in range(A__ )] for x in range(A__ )]
SCREAMING_SNAKE_CASE__ : str = [[0 for x in range(A__ )] for x in range(A__ )]
for chain_length in range(2 , A__ ):
for a in range(1 , n - chain_length + 1 ):
SCREAMING_SNAKE_CASE__ : List[str] = a + chain_length - 1
SCREAMING_SNAKE_CASE__ : Any = sys.maxsize
for c in range(A__ , A__ ):
SCREAMING_SNAKE_CASE__ : Dict = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
SCREAMING_SNAKE_CASE__ : Optional[int] = cost
SCREAMING_SNAKE_CASE__ : int = c
return matrix, sol
def a ( A__ , A__ , A__ ) -> List[str]:
'''simple docstring'''
if i == j:
print('''A''' + str(A__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(A__ , A__ , optimal_solution[i][j] )
print_optiomal_solution(A__ , optimal_solution[i][j] + 1 , A__ )
print(''')''' , end=''' ''' )
def a ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
SCREAMING_SNAKE_CASE__ : List[str] = len(A__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = matrix_chain_order(A__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(A__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 35 |
def a ( A__ ) -> str:
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def a ( A__ ) -> bytes:
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ :Any = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = ['''input_features''']
def __init__( self : List[Any] , _lowercase : Tuple=80 , _lowercase : Optional[Any]=1_60_00 , _lowercase : int=1_60 , _lowercase : Dict=30 , _lowercase : str=4_00 , _lowercase : Tuple=0.0 , _lowercase : Dict=False , **_lowercase : Tuple , ):
super().__init__(
feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_fft
SCREAMING_SNAKE_CASE__ : Optional[int] = hop_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = chunk_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] = chunk_length * sampling_rate
SCREAMING_SNAKE_CASE__ : List[Any] = self.n_samples // hop_length
SCREAMING_SNAKE_CASE__ : int = sampling_rate
SCREAMING_SNAKE_CASE__ : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_lowercase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=_lowercase , norm='''slaney''' , mel_scale='''slaney''' , )
def lowercase__ ( self : int , _lowercase : np.array ):
SCREAMING_SNAKE_CASE__ : Tuple = spectrogram(
_lowercase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
SCREAMING_SNAKE_CASE__ : Optional[int] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE__ : Any = np.maximum(_lowercase , log_spec.max() - 8.0 )
SCREAMING_SNAKE_CASE__ : List[Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase__ ( _lowercase : List[np.ndarray] , _lowercase : List[np.ndarray] , _lowercase : float = 0.0 ):
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ : Any = np.array(_lowercase , np.intaa )
SCREAMING_SNAKE_CASE__ : Tuple = []
for vector, length in zip(_lowercase , attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE__ : Tuple = padding_value
normed_input_values.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , _lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowercase : bool = True , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[str] = "max_length" , _lowercase : Optional[int] = None , _lowercase : Optional[int] = None , _lowercase : Optional[bool] = None , **_lowercase : Union[str, Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE__ : Any = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE__ : List[Any] = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ : Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
SCREAMING_SNAKE_CASE__ : str = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ : List[Any] = [np.asarray([raw_speech] ).T]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ : Any = self.pad(
_lowercase , padding=_lowercase , max_length=max_length if max_length else self.n_samples , truncation=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
SCREAMING_SNAKE_CASE__ : Dict = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
SCREAMING_SNAKE_CASE__ : Dict = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ : int = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
SCREAMING_SNAKE_CASE__ : int = [self._np_extract_fbank_features(_lowercase ) for waveform in input_features[0]]
if isinstance(input_features[0] , _lowercase ):
SCREAMING_SNAKE_CASE__ : int = [np.asarray(_lowercase , dtype=np.floataa ) for feature in input_features]
else:
SCREAMING_SNAKE_CASE__ : Any = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
SCREAMING_SNAKE_CASE__ : Optional[int] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Dict = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 35 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : List[str] = 10
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Any = [1, 2, 3, 4]
SCREAMING_SNAKE_CASE__ : Any = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_lowercase , self.block_size , 0 ) , _lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
SCREAMING_SNAKE_CASE__ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_lowercase , self.block_size , 0 ) , _lowercase )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
SCREAMING_SNAKE_CASE__ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_lowercase , self.block_size , 0 ) , _lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Optional[int] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = process_story(_lowercase )
self.assertEqual(_lowercase , [] )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Tuple = ''''''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = process_story(_lowercase )
self.assertEqual(_lowercase , [] )
self.assertEqual(_lowercase , [] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = process_story(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''It was the best of times.''']
self.assertEqual(_lowercase , _lowercase )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([1, 2, 3, 4] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_lowercase , 0 ).numpy() , expected.numpy() )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_lowercase , 23 ).numpy() , expected.numpy() )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
SCREAMING_SNAKE_CASE__ : int = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_lowercase , 1 ).numpy() , expected.numpy() )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1_01
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
SCREAMING_SNAKE_CASE__ : int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = compute_token_type_ids(_lowercase , _lowercase )
np.testing.assert_array_equal(_lowercase , _lowercase )
| 35 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 | 1 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a_ :Dict = logging.get_logger(__name__)
a_ :str = 'T5Config'
def a ( A__ , A__ , A__ ) -> jnp.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.zeros_like(A__ )
SCREAMING_SNAKE_CASE__ : List[Any] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = shifted_input_ids.at[:, 0].set(A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.where(shifted_input_ids == -1_0_0 , A__ , A__ )
return shifted_input_ids
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Any = '''mt5'''
lowerCamelCase : List[str] = MTaConfig
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = '''mt5'''
lowerCamelCase : Dict = MTaConfig
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Tuple = '''mt5'''
lowerCamelCase : Optional[int] = MTaConfig
| 35 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str=0.2 , _lowercase : str=0.2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : List[str] = conva_get[:2]
SCREAMING_SNAKE_CASE__ : str = conva_get[2]
SCREAMING_SNAKE_CASE__ : Any = size_pa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_w
SCREAMING_SNAKE_CASE__ : Tuple = rate_t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self : Union[str, Any] , _lowercase : Any ):
# save model dict with pickle
SCREAMING_SNAKE_CASE__ : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowercase , '''wb''' ) as f:
pickle.dump(_lowercase , _lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls : Dict , _lowercase : int ):
# read saved model
with open(_lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pickle.load(_lowercase ) # noqa: S301
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE__ : Dict = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ ( self : str , _lowercase : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
return round(_lowercase , 3 )
def lowercase__ ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
# convolution process
SCREAMING_SNAKE_CASE__ : Tuple = convs[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = convs[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_lowercase )
return focus_list, data_featuremap
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="average_pool" ):
# pooling process
SCREAMING_SNAKE_CASE__ : List[str] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_map in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_lowercase )
return data_expanded
def lowercase__ ( self : Tuple , _lowercase : Optional[int] ):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = 0
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Any = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ : Dict = i_pool + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : int=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ : Any = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ : str = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : int = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Any = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(_lowercase , self.vji )
SCREAMING_SNAKE_CASE__ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ : List[str] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ : Dict = self.rate_weight * np.dot(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rp + 1
SCREAMING_SNAKE_CASE__ : List[str] = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '''+-''' )
plt.plot(_lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self : Union[str, Any] , _lowercase : int ):
# model predict
SCREAMING_SNAKE_CASE__ : Dict = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Any = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ : str = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple ):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE__ : str = np.asmatrix(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 35 | 1 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
a_ :List[str] = logging.get_logger(__name__)
class lowercase :
lowerCamelCase : Optional[Any] = None
@experimental
def a ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Optional[int]:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
A__ , A__ , A__ , A__ , A__ , A__ , A__ )
return _map_with_joblib(A__ , A__ , A__ , A__ , A__ , A__ , A__ )
def a ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = num_proc if num_proc <= len(A__ ) else len(A__ )
SCREAMING_SNAKE_CASE__ : List[str] = [] # We organize the splits ourselve (contiguous splits)
for index in range(A__ ):
SCREAMING_SNAKE_CASE__ : Dict = len(A__ ) // num_proc
SCREAMING_SNAKE_CASE__ : Dict = len(A__ ) % num_proc
SCREAMING_SNAKE_CASE__ : Any = div * index + min(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(A__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"""Error dividing inputs iterable among processes. """
f"""Total number of objects {len(A__ )}, """
f"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
f"""Spawning {num_proc} processes for {len(A__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = None, None
if not disable_tqdm:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = (RLock(),), tqdm.set_lock
with Pool(A__ , initargs=A__ , initializer=A__ ) as pool:
SCREAMING_SNAKE_CASE__ : List[Any] = pool.map(A__ , A__ )
logger.info(f"""Finished {num_proc} processes""" )
SCREAMING_SNAKE_CASE__ : Any = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"""Unpacked {len(A__ )} objects""" )
return mapped
def a ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Any:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=A__ ):
return joblib.Parallel()(
joblib.delayed(A__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a ( A__ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
| 35 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 35 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict=7 , _lowercase : List[Any]=3 , _lowercase : str=18 , _lowercase : Optional[int]=30 , _lowercase : List[Any]=4_00 , _lowercase : Any=True , _lowercase : Optional[int]=None , _lowercase : int=True , _lowercase : List[str]=None , _lowercase : int=True , _lowercase : List[str]=[0.5, 0.5, 0.5] , _lowercase : Dict=[0.5, 0.5, 0.5] , _lowercase : Union[str, Any]=False , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = size if size is not None else {'''height''': 20, '''width''': 20}
SCREAMING_SNAKE_CASE__ : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Dict = batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE__ : List[str] = min_resolution
SCREAMING_SNAKE_CASE__ : Dict = max_resolution
SCREAMING_SNAKE_CASE__ : int = do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Tuple = crop_size
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean
SCREAMING_SNAKE_CASE__ : int = image_std
SCREAMING_SNAKE_CASE__ : Optional[int] = do_reduce_labels
def lowercase__ ( self : Tuple ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def a ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : str = Image.open(dataset[0]['''file'''] )
SCREAMING_SNAKE_CASE__ : Dict = Image.open(dataset[1]['''file'''] )
return image, map
def a ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Any = Image.open(ds[0]['''file'''] )
SCREAMING_SNAKE_CASE__ : Dict = Image.open(ds[1]['''file'''] )
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(ds[2]['''file'''] )
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = BeitImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = BeitImageProcessingTester(self )
@property
def lowercase__ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowercase , '''size''' ) )
self.assertTrue(hasattr(_lowercase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowercase , '''center_crop''' ) )
self.assertTrue(hasattr(_lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowercase , '''image_std''' ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowercase )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowercase )
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : Dict ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : Optional[Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : Optional[int] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = []
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
SCREAMING_SNAKE_CASE__ : str = image_processing(_lowercase , _lowercase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ : int = image_processing(_lowercase , _lowercase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE__ : Any = image_processing(_lowercase , _lowercase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def lowercase__ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ : List[str] = image_processing(_lowercase , _lowercase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(_lowercase , _lowercase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 35 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ :Optional[Any] = 'src/diffusers'
a_ :str = '.'
# This is to make sure the diffusers module imported is the one in the repo.
a_ :List[Any] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
a_ :List[str] = spec.loader.load_module()
def a ( A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
return line.startswith(A__ ) or len(A__ ) <= 1 or re.search(r'''^\s*\)(\s*->.*:|:)\s*$''' , A__ ) is not None
def a ( A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = object_name.split('''.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
# First let's find the module where our object lives.
SCREAMING_SNAKE_CASE__ : Tuple = parts[i]
while i < len(A__ ) and not os.path.isfile(os.path.join(A__ , f"""{module}.py""" ) ):
i += 1
if i < len(A__ ):
SCREAMING_SNAKE_CASE__ : int = os.path.join(A__ , parts[i] )
if i >= len(A__ ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(A__ , f"""{module}.py""" ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : Any = f.readlines()
# Now let's find the class / func in the code!
SCREAMING_SNAKE_CASE__ : Any = ''''''
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(A__ ) and re.search(rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(A__ ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
SCREAMING_SNAKE_CASE__ : int = line_index
while line_index < len(A__ ) and _should_continue(lines[line_index] , A__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE__ : List[str] = lines[start_index:line_index]
return "".join(A__ )
a_ :Tuple = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
a_ :Dict = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
a_ :Optional[int] = re.compile(r'<FILL\s+[^>]*>')
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = code.split('''\n''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while idx < len(A__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(A__ ):
return re.search(r'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def a ( A__ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = len(get_indent(A__ ) ) > 0
if has_indent:
SCREAMING_SNAKE_CASE__ : Optional[int] = f"""class Bla:\n{code}"""
SCREAMING_SNAKE_CASE__ : List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = black.format_str(A__ , mode=A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = style_docstrings_in_code(A__ )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def a ( A__ , A__=False ) -> Optional[Any]:
'''simple docstring'''
with open(A__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f.readlines()
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(A__ ):
SCREAMING_SNAKE_CASE__ : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = search.groups()
SCREAMING_SNAKE_CASE__ : Any = find_code_in_diffusers(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_indent(A__ )
SCREAMING_SNAKE_CASE__ : Any = line_index + 1 if indent == theoretical_indent else line_index + 2
SCREAMING_SNAKE_CASE__ : Any = theoretical_indent
SCREAMING_SNAKE_CASE__ : Any = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
SCREAMING_SNAKE_CASE__ : int = True
while line_index < len(A__ ) and should_continue:
line_index += 1
if line_index >= len(A__ ):
break
SCREAMING_SNAKE_CASE__ : List[str] = lines[line_index]
SCREAMING_SNAKE_CASE__ : List[str] = _should_continue(A__ , A__ ) and re.search(f"""^{indent}# End copy""" , A__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE__ : Any = lines[start_index:line_index]
SCREAMING_SNAKE_CASE__ : Optional[int] = ''''''.join(A__ )
# Remove any nested `Copied from` comments to avoid circular copies
SCREAMING_SNAKE_CASE__ : int = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(A__ ) is None]
SCREAMING_SNAKE_CASE__ : List[Any] = '''\n'''.join(A__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(A__ ) > 0:
SCREAMING_SNAKE_CASE__ : int = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
SCREAMING_SNAKE_CASE__ : List[str] = [_re_replace_pattern.search(A__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = pattern.groups()
SCREAMING_SNAKE_CASE__ : str = re.sub(A__ , A__ , A__ )
if option.strip() == "all-casing":
SCREAMING_SNAKE_CASE__ : str = re.sub(obja.lower() , obja.lower() , A__ )
SCREAMING_SNAKE_CASE__ : Tuple = re.sub(obja.upper() , obja.upper() , A__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
SCREAMING_SNAKE_CASE__ : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
SCREAMING_SNAKE_CASE__ : Tuple = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
SCREAMING_SNAKE_CASE__ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
SCREAMING_SNAKE_CASE__ : List[str] = start_index + 1
if overwrite and len(A__ ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(A__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A__ )
return diffs
def a ( A__ = False ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = glob.glob(os.path.join(A__ , '''**/*.py''' ) , recursive=A__ )
SCREAMING_SNAKE_CASE__ : Tuple = []
for filename in all_files:
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_copy_consistent(A__ , A__ )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(A__ ) > 0:
SCREAMING_SNAKE_CASE__ : Dict = '''\n'''.join(A__ )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
a_ :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a_ :List[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 35 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 | 1 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
a_ :Optional[int] = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def a ( A__ ) -> Union[str, Any]:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
a_ :List[Any] = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
a_ :Dict = parser.parse_args()
if args.check_lib:
a_ :List[Any] = importlib.import_module('transformers')
a_ :List[str] = Path(transformers_module.__file__).parent
else:
a_ :Union[str, Any] = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 35 |
import random
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = num - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : int = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pow(A__ , A__ , A__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : Any = i + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (v**2) % num
return True
def a ( A__ ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A__ )
def a ( A__ = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A__ ):
return num
if __name__ == "__main__":
a_ :Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 35 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
a_ :Dict = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__=None , A__=None ) -> Dict:
'''simple docstring'''
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : str = tensor_name.split('''.''' )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = getattr(A__ , A__ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
SCREAMING_SNAKE_CASE__ : List[str] = new_module
SCREAMING_SNAKE_CASE__ : Any = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
SCREAMING_SNAKE_CASE__ : Dict = tensor_name in module._buffers
SCREAMING_SNAKE_CASE__ : Dict = getattr(A__ , A__ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : List[str] = False
if is_buffer or not is_bitsandbytes_available():
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
SCREAMING_SNAKE_CASE__ : Any = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
SCREAMING_SNAKE_CASE__ : str = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
SCREAMING_SNAKE_CASE__ : Dict = old_value.to(A__ )
elif isinstance(A__ , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : int = value.to('''cpu''' )
if value.dtype == torch.inta:
SCREAMING_SNAKE_CASE__ : Optional[int] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(A__ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , A__ ) and fpaa_statistics is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = new_value.T
SCREAMING_SNAKE_CASE__ : int = old_value.__dict__
if is_abit:
SCREAMING_SNAKE_CASE__ : int = bnb.nn.IntaParams(A__ , requires_grad=A__ , **A__ ).to(A__ )
elif is_abit:
SCREAMING_SNAKE_CASE__ : Optional[int] = bnb.nn.Paramsabit(A__ , requires_grad=A__ , **A__ ).to(A__ )
SCREAMING_SNAKE_CASE__ : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(A__ ) )
else:
if value is None:
SCREAMING_SNAKE_CASE__ : Tuple = old_value.to(A__ )
elif isinstance(A__ , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : Tuple = value.to(A__ )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(A__ , device=A__ )
if is_buffer:
SCREAMING_SNAKE_CASE__ : Dict = new_value
else:
SCREAMING_SNAKE_CASE__ : int = nn.Parameter(A__ , requires_grad=old_value.requires_grad )
SCREAMING_SNAKE_CASE__ : List[Any] = new_value
def a ( A__ , A__=None , A__=None , A__=None , A__=False ) -> Tuple:
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Any = []
current_key_name.append(A__ )
if (isinstance(A__ , nn.Linear ) or isinstance(A__ , A__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(A__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(A__ , A__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = module.weight.shape
else:
SCREAMING_SNAKE_CASE__ : Dict = module.in_features
SCREAMING_SNAKE_CASE__ : Union[str, Any] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
SCREAMING_SNAKE_CASE__ : str = bnb.nn.LinearabitLt(
A__ , A__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
SCREAMING_SNAKE_CASE__ : Dict = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
SCREAMING_SNAKE_CASE__ : str = bnb.nn.Linearabit(
A__ , A__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
SCREAMING_SNAKE_CASE__ : Optional[int] = True
# Store the module class in case we need to transpose the weight later
SCREAMING_SNAKE_CASE__ : Optional[Any] = type(A__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(A__ )
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = _replace_with_bnb_linear(
A__ , A__ , A__ , A__ , has_been_replaced=A__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( A__ , A__=None , A__=None , A__=None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_linear(
A__ , A__ , A__ , A__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( *A__ , **A__ ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , A__ , )
return replace_with_bnb_linear(*A__ , **A__ )
def a ( *A__ , **A__ ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , A__ , )
return set_module_quantized_tensor_to_device(*A__ , **A__ )
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = deepcopy(A__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = find_tied_parameters(A__ )
# For compatibility with Accelerate < 0.18
if isinstance(A__ , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : str = sum(A__ , [] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(A__ ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Union[str, Any] = not hasattr(A__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Tuple = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : int = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : str = set(A__ ) - set(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = list(set(A__ ) ) + list(A__ )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Tuple = ['''.weight''', '''.bias''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace(A__ , '''''' )
filtered_module_names.append(A__ )
return filtered_module_names
| 35 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35 | 1 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def a ( A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for part_id in partition_order:
SCREAMING_SNAKE_CASE__ : Tuple = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(A__ ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : str = spark.range(1_0_0 ).repartition(1 )
SCREAMING_SNAKE_CASE__ : str = Spark(A__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=1_6 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 5_0
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : str = spark.range(1_0 ).repartition(2 )
SCREAMING_SNAKE_CASE__ : str = [1, 0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = _generate_iterable_examples(A__ , A__ ) # Reverse the partitions.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , A__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : Dict = spark.range(1_0 ).repartition(1 )
SCREAMING_SNAKE_CASE__ : Tuple = SparkExamplesIterable(A__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A__ ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : Optional[Any] = spark.range(3_0 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
SCREAMING_SNAKE_CASE__ : int = lambda A__ : x.reverse()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [2, 1, 0] )
SCREAMING_SNAKE_CASE__ : Dict = SparkExamplesIterable(A__ ).shuffle_data_sources(A__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : List[str] = spark.range(2_0 ).repartition(4 )
# Partitions 0 and 2
SCREAMING_SNAKE_CASE__ : Optional[int] = SparkExamplesIterable(A__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(A__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
SCREAMING_SNAKE_CASE__ : int = SparkExamplesIterable(A__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE__ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(A__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def a ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
SCREAMING_SNAKE_CASE__ : Optional[int] = spark.range(1_0_0 ).repartition(1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = Spark(A__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
| 35 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self : Dict , _lowercase : Optional[int] , _lowercase : List[str]=13 , _lowercase : Dict=7 , _lowercase : Any=True , _lowercase : Union[str, Any]=True , _lowercase : int=True , _lowercase : Union[str, Any]=True , _lowercase : List[str]=99 , _lowercase : Optional[Any]=32 , _lowercase : Optional[Any]=5 , _lowercase : Optional[int]=4 , _lowercase : Optional[int]=37 , _lowercase : Tuple="gelu" , _lowercase : Optional[Any]=0.1 , _lowercase : List[str]=0.1 , _lowercase : Tuple=5_12 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=2 , _lowercase : int=0.02 , _lowercase : Union[str, Any]=4 , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : Dict = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = seq_length
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_attention_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] = num_choices
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Any = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Any = FlaxAlbertModelTester(self )
@slow
def lowercase__ ( self : List[str] ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class_name.from_pretrained('''albert-base-v2''' )
SCREAMING_SNAKE_CASE__ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
SCREAMING_SNAKE_CASE__ : List[str] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , attention_mask=_lowercase )[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = (1, 11, 7_68)
self.assertEqual(output.shape , _lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) )
| 35 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = LxmertConfig.from_json_file(A__ )
print(f"""Building PyTorch model from configuration: {config}""" )
SCREAMING_SNAKE_CASE__ : str = LxmertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A__ , A__ , A__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 35 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ) -> Optional[Any]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(A__ ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def a ( ) -> str:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def a ( ) -> List[str]:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(A__ ):
http_head('''https://huggingface.co''' )
| 35 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ :Tuple = logging.get_logger(__name__)
a_ :int = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[Any] = '''yolos'''
def __init__( self : Union[str, Any] , _lowercase : List[Any]=7_68 , _lowercase : Dict=12 , _lowercase : Tuple=12 , _lowercase : Tuple=30_72 , _lowercase : Any="gelu" , _lowercase : Optional[int]=0.0 , _lowercase : Tuple=0.0 , _lowercase : str=0.02 , _lowercase : Tuple=1E-12 , _lowercase : str=[5_12, 8_64] , _lowercase : Tuple=16 , _lowercase : Optional[Any]=3 , _lowercase : List[str]=True , _lowercase : Optional[int]=1_00 , _lowercase : Optional[int]=True , _lowercase : Union[str, Any]=False , _lowercase : Optional[int]=1 , _lowercase : Any=5 , _lowercase : Tuple=2 , _lowercase : Optional[int]=5 , _lowercase : List[str]=2 , _lowercase : str=0.1 , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple = image_size
SCREAMING_SNAKE_CASE__ : str = patch_size
SCREAMING_SNAKE_CASE__ : str = num_channels
SCREAMING_SNAKE_CASE__ : Dict = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Tuple = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[int] = class_cost
SCREAMING_SNAKE_CASE__ : Optional[int] = bbox_cost
SCREAMING_SNAKE_CASE__ : Union[str, Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : str = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[Any] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : Dict = eos_coefficient
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[Any] = version.parse('''1.11''' )
@property
def lowercase__ ( self : Union[str, Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self : Any ):
return 1E-4
@property
def lowercase__ ( self : Tuple ):
return 12
| 35 |
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 | 1 |
from PIL import Image
def a ( A__ , A__ ) -> Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(A__ ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(A__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
a_ :Tuple = change_contrast(img, 1_70)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 35 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ :Optional[int] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Dict = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a_ :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 | 1 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def a ( A__ ) -> float:
'''simple docstring'''
return np.dot(A__ , A__ )
class lowercase :
def __init__( self : List[Any] , *,
_lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ):
SCREAMING_SNAKE_CASE__ : List[Any] = regularization
SCREAMING_SNAKE_CASE__ : int = gamma
if kernel == "linear":
SCREAMING_SNAKE_CASE__ : Dict = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
SCREAMING_SNAKE_CASE__ : Any = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
SCREAMING_SNAKE_CASE__ : List[str] = f"""Unknown kernel: {kernel}"""
raise ValueError(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : ndarray , _lowercase : ndarray ):
return np.dot(_lowercase , _lowercase )
def lowercase__ ( self : Any , _lowercase : ndarray , _lowercase : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowercase__ ( self : Any , _lowercase : list[ndarray] , _lowercase : ndarray ):
SCREAMING_SNAKE_CASE__ : List[str] = observations
SCREAMING_SNAKE_CASE__ : Tuple = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((SCREAMING_SNAKE_CASE__) , ) : int = np.shape(_lowercase )
def to_minimize(_lowercase : ndarray ) -> float:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
((SCREAMING_SNAKE_CASE__) , ) : int = np.shape(_lowercase )
for i in range(_lowercase ):
for j in range(_lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = LinearConstraint(_lowercase , 0 , 0 )
SCREAMING_SNAKE_CASE__ : int = Bounds(0 , self.regularization )
SCREAMING_SNAKE_CASE__ : Dict = minimize(
_lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x
SCREAMING_SNAKE_CASE__ : Dict = l_star
# calculating mean offset of separation plane to points
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for i in range(_lowercase ):
for j in range(_lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
SCREAMING_SNAKE_CASE__ : Tuple = s / n
def lowercase__ ( self : Tuple , _lowercase : ndarray ):
SCREAMING_SNAKE_CASE__ : List[str] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
def a ( A__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 35 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[str] = num_of_nodes
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Optional[int] , _lowercase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Any = self.find_component(_lowercase )
def lowercase__ ( self : int , _lowercase : list[int] , _lowercase : int , _lowercase : int ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Tuple = self.m_component[u]
SCREAMING_SNAKE_CASE__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = edge
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : Any ):
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_lowercase , )
assert hasattr(self , '''env''' )
def lowercase__ ( self : Any , _lowercase : List[str] ):
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''enabled''': True,
'''processes_per_host''': 8,
}
SCREAMING_SNAKE_CASE__ : List[str] = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
SCREAMING_SNAKE_CASE__ : int = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
SCREAMING_SNAKE_CASE__ : Optional[int] = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=_lowercase , instance_type=self.instance_type , debugger_hook_config=_lowercase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=_lowercase , py_version='''py36''' , )
def lowercase__ ( self : Optional[int] , _lowercase : Union[str, Any] ):
TrainingJobAnalytics(_lowercase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowercase__ ( self : str , _lowercase : Optional[Any] ):
# create estimator
SCREAMING_SNAKE_CASE__ : Tuple = self.create_estimator(_lowercase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE__ : Tuple = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__ : str = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _lowercase )
| 35 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ :Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ :Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 |
def a ( A__ ) -> str:
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def a ( A__ ) -> bytes:
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( A__ ) -> List[Any]:
'''simple docstring'''
for param in module.parameters():
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def a ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
SCREAMING_SNAKE_CASE__ : List[str] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = plt.imshow(A__ )
fig.axes.get_xaxis().set_visible(A__ )
fig.axes.get_yaxis().set_visible(A__ )
plt.show()
def a ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = datetime.now()
SCREAMING_SNAKE_CASE__ : Optional[int] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 35 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ :int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Optional[int] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ :Optional[int] = logging.get_logger(__name__)
a_ :List[Any] = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def a ( A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(A__ , A__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ : str = getattr(A__ , A__ ).shape
else:
SCREAMING_SNAKE_CASE__ : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ : Dict = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ : Dict = value
else:
SCREAMING_SNAKE_CASE__ : Any = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def a ( A__ , A__ , A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : int = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ : List[Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE__ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ : Optional[int] = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE__ : int = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ : Optional[Any] = name.split(A__ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE__ : Any = mapped_key.replace('''*''' , A__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ : List[str] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ : int = '''weight_v'''
elif "weight" in name:
SCREAMING_SNAKE_CASE__ : Any = '''weight'''
elif "bias" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''bias'''
else:
SCREAMING_SNAKE_CASE__ : Dict = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def a ( A__ , A__ , A__ , A__ , A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE__ : int = name.split('''.''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(items[0] )
SCREAMING_SNAKE_CASE__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
SCREAMING_SNAKE_CASE__ : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE__ : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
SCREAMING_SNAKE_CASE__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A__ )
def a ( A__ , A__ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = SEWConfig()
if is_finetuned:
SCREAMING_SNAKE_CASE__ : Any = model.wav_encoder.wav_model.cfg
else:
SCREAMING_SNAKE_CASE__ : Any = model.cfg
SCREAMING_SNAKE_CASE__ : str = fs_config.conv_bias
SCREAMING_SNAKE_CASE__ : str = eval(fs_config.conv_feature_layers )
SCREAMING_SNAKE_CASE__ : Optional[int] = [x[0] for x in conv_layers]
SCREAMING_SNAKE_CASE__ : int = [x[1] for x in conv_layers]
SCREAMING_SNAKE_CASE__ : Tuple = [x[2] for x in conv_layers]
SCREAMING_SNAKE_CASE__ : int = '''gelu'''
SCREAMING_SNAKE_CASE__ : Any = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0.0
SCREAMING_SNAKE_CASE__ : List[Any] = fs_config.activation_fn.name
SCREAMING_SNAKE_CASE__ : Any = fs_config.encoder_embed_dim
SCREAMING_SNAKE_CASE__ : str = 0.0_2
SCREAMING_SNAKE_CASE__ : Any = fs_config.encoder_ffn_embed_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = 1e-5
SCREAMING_SNAKE_CASE__ : str = fs_config.encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = fs_config.encoder_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = fs_config.conv_pos_groups
SCREAMING_SNAKE_CASE__ : List[str] = fs_config.conv_pos
SCREAMING_SNAKE_CASE__ : Optional[int] = len(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = fs_config.encoder_layers
SCREAMING_SNAKE_CASE__ : str = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
SCREAMING_SNAKE_CASE__ : List[str] = model.cfg
SCREAMING_SNAKE_CASE__ : Union[str, Any] = fs_config.final_dropout
SCREAMING_SNAKE_CASE__ : str = fs_config.layerdrop
SCREAMING_SNAKE_CASE__ : str = fs_config.activation_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
SCREAMING_SNAKE_CASE__ : Optional[int] = fs_config.attention_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = fs_config.dropout_input
SCREAMING_SNAKE_CASE__ : List[str] = fs_config.dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = fs_config.mask_channel_length
SCREAMING_SNAKE_CASE__ : Tuple = fs_config.mask_channel_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = fs_config.mask_length
SCREAMING_SNAKE_CASE__ : List[str] = fs_config.mask_prob
SCREAMING_SNAKE_CASE__ : List[Any] = '''Wav2Vec2FeatureExtractor'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def a ( A__ , A__ , A__=None , A__=None , A__=True ) -> Dict:
'''simple docstring'''
if is_finetuned:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = SEWConfig.from_pretrained(A__ )
else:
SCREAMING_SNAKE_CASE__ : List[str] = convert_config(model[0] , A__ )
SCREAMING_SNAKE_CASE__ : List[Any] = model[0].eval()
SCREAMING_SNAKE_CASE__ : Dict = True if config.feat_extract_norm == '''layer''' else False
SCREAMING_SNAKE_CASE__ : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , )
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ : List[Any] = Dictionary.load(A__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ : Any = target_dict.pad_index
SCREAMING_SNAKE_CASE__ : int = target_dict.bos_index
SCREAMING_SNAKE_CASE__ : Optional[int] = target_dict.pad_index
SCREAMING_SNAKE_CASE__ : Optional[Any] = target_dict.bos_index
SCREAMING_SNAKE_CASE__ : str = target_dict.eos_index
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ : str = os.path.join(A__ , '''vocab.json''' )
if not os.path.isdir(A__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(A__ ) )
return
os.makedirs(A__ , exist_ok=A__ )
with open(A__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , A__ )
SCREAMING_SNAKE_CASE__ : List[Any] = WavaVecaCTCTokenizer(
A__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=A__ , )
SCREAMING_SNAKE_CASE__ : int = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
processor.save_pretrained(A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SEWForCTC(A__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = SEWModel(A__ )
feature_extractor.save_pretrained(A__ )
recursively_load_weights(A__ , A__ , A__ )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
a_ :Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
a_ :Union[str, Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 35 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str=0.2 , _lowercase : str=0.2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : List[str] = conva_get[:2]
SCREAMING_SNAKE_CASE__ : str = conva_get[2]
SCREAMING_SNAKE_CASE__ : Any = size_pa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_w
SCREAMING_SNAKE_CASE__ : Tuple = rate_t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self : Union[str, Any] , _lowercase : Any ):
# save model dict with pickle
SCREAMING_SNAKE_CASE__ : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowercase , '''wb''' ) as f:
pickle.dump(_lowercase , _lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls : Dict , _lowercase : int ):
# read saved model
with open(_lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pickle.load(_lowercase ) # noqa: S301
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE__ : Dict = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ ( self : str , _lowercase : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
return round(_lowercase , 3 )
def lowercase__ ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
# convolution process
SCREAMING_SNAKE_CASE__ : Tuple = convs[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = convs[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_lowercase )
return focus_list, data_featuremap
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="average_pool" ):
# pooling process
SCREAMING_SNAKE_CASE__ : List[str] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_map in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_lowercase )
return data_expanded
def lowercase__ ( self : Tuple , _lowercase : Optional[int] ):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = 0
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Any = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ : Dict = i_pool + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : int=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ : Any = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ : str = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : int = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Any = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(_lowercase , self.vji )
SCREAMING_SNAKE_CASE__ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ : List[str] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ : Dict = self.rate_weight * np.dot(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rp + 1
SCREAMING_SNAKE_CASE__ : List[str] = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '''+-''' )
plt.plot(_lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self : Union[str, Any] , _lowercase : int ):
# model predict
SCREAMING_SNAKE_CASE__ : Dict = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Any = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ : str = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple ):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE__ : str = np.asmatrix(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 35 | 1 |
from math import isclose, sqrt
def a ( A__ , A__ , A__ ) -> tuple[float, float, float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = point_y / 4 / point_x
SCREAMING_SNAKE_CASE__ : Dict = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
SCREAMING_SNAKE_CASE__ : List[str] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
SCREAMING_SNAKE_CASE__ : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = outgoing_gradient**2 + 4
SCREAMING_SNAKE_CASE__ : Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
SCREAMING_SNAKE_CASE__ : Dict = (point_y - outgoing_gradient * point_x) ** 2 - 1_0_0
SCREAMING_SNAKE_CASE__ : Optional[int] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
SCREAMING_SNAKE_CASE__ : List[str] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
SCREAMING_SNAKE_CASE__ : List[Any] = x_minus if isclose(A__ , A__ ) else x_plus
SCREAMING_SNAKE_CASE__ : Union[str, Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a ( A__ = 1.4 , A__ = -9.6 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : float = first_x_coord
SCREAMING_SNAKE_CASE__ : float = first_y_coord
SCREAMING_SNAKE_CASE__ : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = next_point(A__ , A__ , A__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 35 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ :Optional[Any] = logging.get_logger(__name__)
a_ :str = torch.device('cpu')
def a ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ : Dict = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
def a ( A__ ) -> List[str]:
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def a ( A__ , A__ , A__ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = dct.pop(A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = val
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = []
for k in state_dict.keys():
SCREAMING_SNAKE_CASE__ : str = k
if ".pwconv" in k:
SCREAMING_SNAKE_CASE__ : str = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
SCREAMING_SNAKE_CASE__ : Optional[int] = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
SCREAMING_SNAKE_CASE__ : Tuple = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
SCREAMING_SNAKE_CASE__ : int = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
SCREAMING_SNAKE_CASE__ : Dict = k_new.split('''.''' )
if ls[2].isdigit():
SCREAMING_SNAKE_CASE__ : Optional[int] = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
SCREAMING_SNAKE_CASE__ : str = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def a ( A__ , A__ , A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE__ : Tuple = 1_0_0_0
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE__ : Any = json.load(open(hf_hub_download(A__ , A__ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[str] = idalabel
SCREAMING_SNAKE_CASE__ : str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [3, 3, 6, 4]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
SCREAMING_SNAKE_CASE__ : Tuple = [3, 3, 9, 6]
SCREAMING_SNAKE_CASE__ : List[str] = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
SCREAMING_SNAKE_CASE__ : Any = [4, 3, 1_0, 5]
SCREAMING_SNAKE_CASE__ : int = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
SCREAMING_SNAKE_CASE__ : Optional[Any] = [4, 4, 1_2, 6]
SCREAMING_SNAKE_CASE__ : Dict = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = torch.hub.load_state_dict_from_url(A__ , map_location='''cpu''' , check_hash=A__ )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.load(A__ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = checkpoint
SCREAMING_SNAKE_CASE__ : Dict = create_rename_keys(A__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A__ , A__ , A__ )
# load HuggingFace model
SCREAMING_SNAKE_CASE__ : str = SwiftFormerForImageClassification(A__ ).eval()
hf_model.load_state_dict(A__ )
# prepare test inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
SCREAMING_SNAKE_CASE__ : str = processor(images=A__ , return_tensors='''pt''' )
# compare outputs from both models
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_expected_output(A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , A__ , atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
a_ :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
a_ :int = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 35 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self : int , _lowercase : Any , _lowercase : Union[str, Any]=13 , _lowercase : int=7 , _lowercase : Union[str, Any]=True , _lowercase : Any=True , _lowercase : Optional[Any]=False , _lowercase : Any=True , _lowercase : Optional[int]=99 , _lowercase : Dict=32 , _lowercase : Optional[Any]=5 , _lowercase : List[Any]=4 , _lowercase : List[str]=37 , _lowercase : List[Any]="gelu" , _lowercase : Optional[Any]=0.1 , _lowercase : Optional[Any]=0.1 , _lowercase : Dict=5_12 , _lowercase : Optional[Any]=16 , _lowercase : str=2 , _lowercase : List[str]=0.02 , _lowercase : Optional[int]=3 , _lowercase : str=4 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = seq_length
SCREAMING_SNAKE_CASE__ : Any = is_training
SCREAMING_SNAKE_CASE__ : Any = use_input_mask
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_act
SCREAMING_SNAKE_CASE__ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = num_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE__ : Optional[int] = scope
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Tuple ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : List[Any] , _lowercase : Dict , _lowercase : str , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Tuple = BioGptModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(_lowercase , attention_mask=_lowercase )
SCREAMING_SNAKE_CASE__ : str = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , _lowercase : int , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : int , _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Tuple = BioGptForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : int , *_lowercase : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel(config=_lowercase )
model.to(_lowercase )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self.seq_length // 2
SCREAMING_SNAKE_CASE__ : List[Any] = 0
# first forward pass
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase , attention_mask=_lowercase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor((1,) , _lowercase ).item() + 1
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_lowercase )] , dim=1 , )
# get two different outputs
SCREAMING_SNAKE_CASE__ : int = model(_lowercase , attention_mask=_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : Any = model(_lowercase , past_key_values=_lowercase , attention_mask=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-3 ) )
def lowercase__ ( self : Optional[Any] , _lowercase : str , _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Union[str, Any] , *_lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : List[str] = BioGptModel(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : str = torch.ones(input_ids.shape , dtype=torch.long , device=_lowercase )
# first forward pass
SCREAMING_SNAKE_CASE__ : int = model(_lowercase , attention_mask=_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , attention_mask=_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[
'''last_hidden_state'''
]
# select random slice
SCREAMING_SNAKE_CASE__ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-3 ) )
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : Tuple , *_lowercase : Optional[Any] , _lowercase : Tuple=False ):
SCREAMING_SNAKE_CASE__ : Dict = BioGptForCausalLM(_lowercase )
model.to(_lowercase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE__ : Dict = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def lowercase__ ( self : int , _lowercase : Optional[Any] , *_lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = BioGptModel(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def lowercase__ ( self : int , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : Optional[int] , *_lowercase : Any ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = BioGptForTokenClassification(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : str = config_and_inputs
SCREAMING_SNAKE_CASE__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Dict = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCamelCase : List[Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : List[Any] = False
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModelTester(self )
SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def lowercase__ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : int = type
self.model_tester.create_and_check_model(*_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_lowercase )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_lowercase , gradient_checkpointing=_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_lowercase )
@slow
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE__ : Any = '''left'''
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE__ : Optional[int] = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE__ : List[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer(_lowercase , return_tensors='''pt''' , padding=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = inputs['''input_ids'''].to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.generate(
input_ids=_lowercase , attention_mask=inputs['''attention_mask'''].to(_lowercase ) , )
SCREAMING_SNAKE_CASE__ : int = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(input_ids=_lowercase )
SCREAMING_SNAKE_CASE__ : str = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = model.generate(input_ids=_lowercase , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ : int = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , [non_padded_sentence, padded_sentence] )
@slow
def lowercase__ ( self : int ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Dict = 3
SCREAMING_SNAKE_CASE__ : List[Any] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : List[str] = input_ids.ne(1 ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Any = BioGptForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[Any] = 3
SCREAMING_SNAKE_CASE__ : Dict = '''multi_label_classification'''
SCREAMING_SNAKE_CASE__ : Dict = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : List[str] = input_ids.ne(1 ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ : Optional[int] = BioGptForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE__ : str = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = 4_23_84
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _lowercase )
SCREAMING_SNAKE_CASE__ : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE__ : List[str] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_lowercase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = model.generate(
**_lowercase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=_lowercase , )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ : str = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_lowercase , _lowercase )
| 35 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = input_paths_and_base_extractors[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(A__ )
assert base_extractor.is_extractable(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(A__ , A__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE__ : List[str] = file_path.read_text(encoding='''utf-8''' )
else:
SCREAMING_SNAKE_CASE__ : Tuple = output_path.read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE__ : List[str] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'''compression_format, is_archive''' , [
('''7z''', True),
('''bz2''', False),
('''gzip''', False),
('''lz4''', False),
('''tar''', True),
('''xz''', False),
('''zip''', True),
('''zstd''', False),
] , )
def a ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
SCREAMING_SNAKE_CASE__ : int = input_paths[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE__ : int = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(A__ )
SCREAMING_SNAKE_CASE__ : Any = Extractor.infer_extractor_format(A__ )
assert extractor_format is not None
SCREAMING_SNAKE_CASE__ : Any = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(A__ , A__ , A__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE__ : List[Any] = file_path.read_text(encoding='''utf-8''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = output_path.read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE__ : List[Any] = text_file.read_text(encoding='''utf-8''' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def a ( A__ , A__ ) -> str:
'''simple docstring'''
import tarfile
SCREAMING_SNAKE_CASE__ : Optional[int] = tmp_path / '''data_dot_dot'''
directory.mkdir()
SCREAMING_SNAKE_CASE__ : str = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(A__ , '''w''' ) as f:
f.add(A__ , arcname=os.path.join('''..''' , text_file.name ) )
return path
@pytest.fixture
def a ( A__ ) -> Union[str, Any]:
'''simple docstring'''
import tarfile
SCREAMING_SNAKE_CASE__ : Optional[int] = tmp_path / '''data_sym_link'''
directory.mkdir()
SCREAMING_SNAKE_CASE__ : Tuple = directory / '''tar_file_with_sym_link.tar'''
os.symlink('''..''' , directory / '''subdir''' , target_is_directory=A__ )
with tarfile.TarFile(A__ , '''w''' ) as f:
f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , )
def a ( A__ , A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
SCREAMING_SNAKE_CASE__ : Tuple = insecure_tar_files[insecure_tar_file]
SCREAMING_SNAKE_CASE__ : List[Any] = tmp_path / '''extracted'''
TarExtractor.extract(A__ , A__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
SCREAMING_SNAKE_CASE__ : Tuple = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('''wb''' ) as f:
f.write(A__ )
assert zipfile.is_zipfile(str(A__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(A__ ) # but we're right
| 35 |
import random
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = num - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : int = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pow(A__ , A__ , A__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : Any = i + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (v**2) % num
return True
def a ( A__ ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A__ )
def a ( A__ = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A__ ):
return num
if __name__ == "__main__":
a_ :Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 35 | 1 |
def a ( A__ , A__ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
SCREAMING_SNAKE_CASE__ : Tuple = str(bin(A__ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ : str = str(bin(A__ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ : List[str] = max(len(A__ ) , len(A__ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
a_ :Tuple = None
a_ :Optional[Any] = logging.get_logger(__name__)
a_ :int = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
a_ :List[Any] = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
a_ :Tuple = {
'camembert-base': 5_12,
}
a_ :Dict = '▁'
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Any = ['''input_ids''', '''attention_mask''']
lowerCamelCase : Tuple = CamembertTokenizer
def __init__( self : int , _lowercase : int=None , _lowercase : List[str]=None , _lowercase : Optional[int]="<s>" , _lowercase : Optional[int]="</s>" , _lowercase : Tuple="</s>" , _lowercase : str="<s>" , _lowercase : Tuple="<unk>" , _lowercase : str="<pad>" , _lowercase : Dict="<mask>" , _lowercase : List[str]=["<s>NOTUSED", "</s>NOTUSED"] , **_lowercase : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
_lowercase , tokenizer_file=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Optional[Any] = False if not self.vocab_file else True
def lowercase__ ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Any , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 35 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 | 1 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a_ :Tuple = getLogger(__name__)
a_ :Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
def a ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(A__ ).open('''w''' , encoding='''utf-8''' )
SCREAMING_SNAKE_CASE__ : List[Any] = str(A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
SCREAMING_SNAKE_CASE__ : Any = model.half()
SCREAMING_SNAKE_CASE__ : List[str] = AutoTokenizer.from_pretrained(A__ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
SCREAMING_SNAKE_CASE__ : Optional[int] = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [prefix + text for text in examples_chunk]
SCREAMING_SNAKE_CASE__ : Any = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
SCREAMING_SNAKE_CASE__ : str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
SCREAMING_SNAKE_CASE__ : List[Any] = int(time.time() - start_time ) # seconds
SCREAMING_SNAKE_CASE__ : Any = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a ( ) -> Tuple:
'''simple docstring'''
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a ( A__=True ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_known_args()
SCREAMING_SNAKE_CASE__ : List[Any] = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
SCREAMING_SNAKE_CASE__ : str = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
SCREAMING_SNAKE_CASE__ : List[Any] = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
SCREAMING_SNAKE_CASE__ : Optional[Any] = calculate_bleu if '''translation''' in args.task else calculate_rouge
SCREAMING_SNAKE_CASE__ : List[str] = [x.rstrip() for x in open(args.save_path ).readlines()]
SCREAMING_SNAKE_CASE__ : List[str] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
SCREAMING_SNAKE_CASE__ : dict = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
SCREAMING_SNAKE_CASE__ : Tuple = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 35 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.