code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__="cls" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = project_dim
SCREAMING_SNAKE_CASE_ : List[str] = pooler_fn
SCREAMING_SNAKE_CASE_ : List[Any] = learn_encoder
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_attention_mask
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = [r"""pooler""", r"""logit_scale"""]
_UpperCAmelCase = [r"""position_ids""", r"""predictions.decoder.bias"""]
_UpperCAmelCase = """roberta"""
_UpperCAmelCase = RobertaSeriesConfig
def __init__( self , lowerCAmelCase__ ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = XLMRobertaModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ : str = getattr(lowerCAmelCase__ , 'has_pre_transformation' , lowerCAmelCase__ )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ : List[Any] = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def UpperCamelCase__ ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : List[Any] = self.base_model(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowerCAmelCase__ , )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ : List[str] = outputs['hidden_states'][-2]
SCREAMING_SNAKE_CASE_ : List[str] = self.pre_LN(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.transformation_pre(lowerCAmelCase__ )
return TransformationModelOutput(
projection_state=lowerCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
SCREAMING_SNAKE_CASE_ : int = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 101 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : UNetaDModel
UpperCAmelCase : KarrasVeScheduler
def __init__( self : Any , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Optional[Any] , ):
_A = self.unet.config.sample_size
_A = (batch_size, 3, img_size, img_size)
_A = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_A = self.scheduler.schedule[t]
_A = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_A , _A = self.scheduler.add_noise_to_input(_UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_A = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_A = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_A = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_A = self.scheduler.step_correct(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , step_output.prev_sample , step_output['derivative'] , )
_A = step_output.prev_sample
_A = (sample / 2 + 0.5).clamp(0 , 1 )
_A = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 7 | 0 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = [i[0] for i in r], [i[1] for i in r]
UpperCamelCase : Optional[Any] = list(accumulate(SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[Any] = bisect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
"""simple docstring"""
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = None
_A = None
_A = graph
self._normalize_graph(_UpperCAmelCase , _UpperCAmelCase )
_A = len(_UpperCAmelCase )
_A = None
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_UpperCAmelCase ) > 1 or len(_UpperCAmelCase ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ):
_A = algorithm(self )
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Union[str, Any] ):
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
self._algorithm()
_A = True
def lowerCAmelCase_ ( self : int ):
pass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Any ):
super().__init__(_UpperCAmelCase )
# use this to save your result
_A = -1
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[Any] ):
super().__init__(_UpperCAmelCase )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def lowerCAmelCase_ ( self : Dict ):
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(_UpperCAmelCase ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(_UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_UpperCAmelCase ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_UpperCAmelCase , _UpperCAmelCase )
self.relabel(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int ):
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a = [0]
a = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 7 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def snake_case ( lowerCAmelCase_ ) -> List[Any]:
_snake_case = filter(lambda lowerCAmelCase_ : p.requires_grad , model.parameters() )
_snake_case = sum([np.prod(p.size() ) for p in model_parameters] )
return params
snake_case = logging.getLogger(__name__)
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
if metric == "rouge2":
_snake_case = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_snake_case = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_snake_case = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
_snake_case = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
_snake_case = ModelCheckpoint(
dirpath=lowerCAmelCase_ , filename=lowerCAmelCase_ , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=lowerCAmelCase_ , verbose=lowerCAmelCase_ , )
class UpperCAmelCase ( pl.Callback ):
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
"""simple docstring"""
_snake_case = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__lowerCamelCase )
@rank_zero_only
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : pl.Trainer , __lowerCamelCase : pl.LightningModule , __lowerCamelCase : str , __lowerCamelCase : str=True ):
"""simple docstring"""
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_snake_case = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_snake_case = Path(pl_module.hparams.output_dir )
if type_path == "test":
_snake_case = od / '''test_results.txt'''
_snake_case = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_snake_case = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
_snake_case = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__lowerCamelCase )
generations_file.parent.mkdir(exist_ok=__lowerCamelCase )
with open(__lowerCamelCase , '''a+''' ) as writer:
for key in sorted(__lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
_snake_case = metrics[key]
if isinstance(__lowerCamelCase , torch.Tensor ):
_snake_case = val.item()
_snake_case = f"""{key}: {val:.6f}\n"""
writer.write(__lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
_snake_case = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(__lowerCamelCase )
@rank_zero_only
def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
"""simple docstring"""
try:
_snake_case = pl_module.model.model.num_parameters()
except AttributeError:
_snake_case = pl_module.model.num_parameters()
_snake_case = count_trainable_parameters(__lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def __UpperCAmelCase ( self : str , __lowerCamelCase : pl.Trainer , __lowerCamelCase : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__lowerCamelCase , __lowerCamelCase , '''test''' )
@rank_zero_only
def __UpperCAmelCase ( self : Any , __lowerCamelCase : pl.Trainer , __lowerCamelCase : List[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 103 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = SpeechTaTokenizer
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[int] = True
def lowerCAmelCase_ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
_A = SpeechTaTokenizer(_UpperCAmelCase )
_A = AddedToken('<mask>' , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )
_A = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
_A = 'this is a test'
_A = 'this is a test'
return input_text, output_text
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict=20 , _UpperCAmelCase : str=5 ):
_A , _A = self.get_input_output_texts(_UpperCAmelCase )
_A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_A = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = '<pad>'
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(_UpperCAmelCase ) , 81 )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase_ ( self : Any ):
_A = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_A = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_A = tokenizer.add_tokens(_UpperCAmelCase )
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size + len(_UpperCAmelCase ) )
_A = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_A = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_A = tokenizer.add_special_tokens(_UpperCAmelCase )
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size_a + len(_UpperCAmelCase ) )
_A = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Any ):
pass
def lowerCAmelCase_ ( self : Dict ):
_A = self.get_tokenizer()
_A = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_A = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
# Use custom sequence because this tokenizer does not handle numbers.
_A = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_A = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_UpperCAmelCase , )
| 7 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Optional[int] = "roformer"
def __init__( self , SCREAMING_SNAKE_CASE__=50000 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1536 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1e-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ) -> Any:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A__ = vocab_size
A__ = hidden_size if embedding_size is None else embedding_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = rotary_value
A__ = use_cache
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A__ = {0: "batch", 1: "sequence"}
A__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 104 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 7 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : Union[str, Any] = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[int] = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 |
"""simple docstring"""
import argparse
a = '''docs/source/_static/js/custom.js'''
def _snake_case ( _snake_case : Dict ) -> Any:
'''simple docstring'''
with open(_snake_case , encoding='utf-8' , newline='\n' ) as f:
_A = f.readlines()
_A = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
_A = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(_snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
a = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__snake_case :Tuple =HfArgumentParser(InitializationArguments)
__snake_case :Optional[int] =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__snake_case :int =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__snake_case :Optional[int] ={
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
__snake_case :Optional[Any] =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__snake_case :List[Any] =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 106 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''vit_mae'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Optional[int]=3_072 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : Optional[Any]=224 , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=512 , _UpperCAmelCase : int=8 , _UpperCAmelCase : List[Any]=2_048 , _UpperCAmelCase : Optional[Any]=0.75 , _UpperCAmelCase : List[str]=False , **_UpperCAmelCase : Union[str, Any] , ):
super().__init__(**_UpperCAmelCase )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = decoder_num_attention_heads
_A = decoder_hidden_size
_A = decoder_num_hidden_layers
_A = decoder_intermediate_size
_A = mask_ratio
_A = norm_pix_loss
| 7 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
_A = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_A = 1
if upper_limit > 0:
_A = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__snake_case ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
_UpperCAmelCase : int = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 107 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
a = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _snake_case ( _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
_A = torch.load(_snake_case , map_location='cpu' )
return sd
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Tuple=rename_keys_prefix ) -> List[str]:
'''simple docstring'''
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1] )
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def _snake_case ( _snake_case : List[str] , _snake_case : Dict ) -> Dict:
'''simple docstring'''
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = 'pretraining'
if "vcr" in checkpoint_path:
_A = {'visual_embedding_dim': 5_12}
elif "vqa_advanced" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
elif "vqa" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
elif "nlvr" in checkpoint_path:
_A = {'visual_embedding_dim': 10_24}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
_A = {'visual_embedding_dim': 5_12}
_A = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
_A = 'vqa_advanced'
elif "vqa" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48, 'num_labels': 31_29}
_A = 'vqa'
elif "nlvr" in checkpoint_path:
_A = {
'visual_embedding_dim': 10_24,
'num_labels': 2,
}
_A = 'nlvr'
_A = VisualBertConfig(**_snake_case )
# Load State Dict
_A = load_state_dict(_snake_case )
_A = get_new_dict(_snake_case , _snake_case )
if model_type == "pretraining":
_A = VisualBertForPreTraining(_snake_case )
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(_snake_case )
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(_snake_case )
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(_snake_case )
model.load_state_dict(_snake_case )
# Save Checkpoints
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
a = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 7 | 0 |
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
_UpperCAmelCase = sum(__snake_case ) / len(__snake_case ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod() | 108 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
for param in module.parameters():
_A = False
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_A = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = plt.imshow(_snake_case )
fig.axes.get_xaxis().set_visible(_snake_case )
fig.axes.get_yaxis().set_visible(_snake_case )
plt.show()
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = datetime.now()
_A = current_time.strftime('%H:%M:%S' )
return timestamp
| 7 | 0 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __a ( _snake_case ):
__UpperCamelCase : torch.FloatTensor
__UpperCamelCase : Optional[torch.FloatTensor] = None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ) -> Union[str, Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__SCREAMING_SNAKE_CASE = []
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class __a ( _snake_case, _snake_case ):
@register_to_config
def __init__( self : Tuple ,lowerCamelCase : int = 1000 ,lowerCamelCase : str = "fixed_small_log" ,lowerCamelCase : bool = True ,lowerCamelCase : Optional[float] = 1.0 ,lowerCamelCase : str = "epsilon" ,lowerCamelCase : str = "squaredcos_cap_v2" ,):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(lowerCamelCase )
__SCREAMING_SNAKE_CASE = 1.0 - self.betas
__SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas ,dim=0 )
__SCREAMING_SNAKE_CASE = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE = 1.0
# setable values
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = torch.from_numpy(np.arange(0 ,lowerCamelCase )[::-1].copy() )
__SCREAMING_SNAKE_CASE = variance_type
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCAmelCase__ ( self : str ,lowerCamelCase : int ,lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = num_inference_steps
__SCREAMING_SNAKE_CASE = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__SCREAMING_SNAKE_CASE = (np.arange(0 ,lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Dict ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : List[str]=None ,lowerCamelCase : Any=None ):
'''simple docstring'''
if prev_timestep is None:
__SCREAMING_SNAKE_CASE = t - 1
__SCREAMING_SNAKE_CASE = self.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__SCREAMING_SNAKE_CASE = self.betas[t]
else:
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__SCREAMING_SNAKE_CASE = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__SCREAMING_SNAKE_CASE = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__SCREAMING_SNAKE_CASE = torch.log(torch.clamp(lowerCamelCase ,min=1E-2_0 ) )
__SCREAMING_SNAKE_CASE = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__SCREAMING_SNAKE_CASE = variance.log()
__SCREAMING_SNAKE_CASE = beta.log()
__SCREAMING_SNAKE_CASE = (predicted_variance + 1) / 2
__SCREAMING_SNAKE_CASE = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase__ ( self : str ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : int ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Tuple=None ,lowerCamelCase : bool = True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.split(lowerCamelCase ,sample.shape[1] ,dim=1 )
else:
__SCREAMING_SNAKE_CASE = None
# 1. compute alphas, betas
if prev_timestep is None:
__SCREAMING_SNAKE_CASE = t - 1
__SCREAMING_SNAKE_CASE = self.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__SCREAMING_SNAKE_CASE = self.betas[t]
__SCREAMING_SNAKE_CASE = self.alphas[t]
else:
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t / alpha_prod_t_prev
__SCREAMING_SNAKE_CASE = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE = torch.clamp(
lowerCamelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__SCREAMING_SNAKE_CASE = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__SCREAMING_SNAKE_CASE = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__SCREAMING_SNAKE_CASE = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__SCREAMING_SNAKE_CASE = 0
if t > 0:
__SCREAMING_SNAKE_CASE = randn_tensor(
model_output.shape ,dtype=model_output.dtype ,generator=lowerCamelCase ,device=model_output.device )
__SCREAMING_SNAKE_CASE = self._get_variance(
lowerCamelCase ,predicted_variance=lowerCamelCase ,prev_timestep=lowerCamelCase ,)
if self.variance_type == "fixed_small_log":
__SCREAMING_SNAKE_CASE = variance
elif self.variance_type == "learned_range":
__SCREAMING_SNAKE_CASE = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
""" for the UnCLIPScheduler.""" )
__SCREAMING_SNAKE_CASE = variance * variance_noise
__SCREAMING_SNAKE_CASE = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCamelCase ,pred_original_sample=lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.IntTensor ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype )
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE = alphas_cumprod[timesteps] ** 0.5
__SCREAMING_SNAKE_CASE = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE = sqrt_alpha_prod.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE = (1 - alphas_cumprod[timesteps]) ** 0.5
__SCREAMING_SNAKE_CASE = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 109 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
UpperCAmelCase : Optional[int] = '''ViTImageProcessor'''
UpperCAmelCase : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Tuple , _UpperCAmelCase : int=None , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : Dict ):
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[Any] , _UpperCAmelCase : int=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Union[str, Any] ):
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
_A = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
_A = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_A = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self : Tuple ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 7 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 110 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def _snake_case ( _snake_case : int ) -> datetime:
'''simple docstring'''
_A = year % 19
_A = year % 4
_A = year % 7
_A = math.floor(year / 1_00 )
_A = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_A = leap_day_inhibits / 4
_A = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_A = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_A = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_A = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 18 )
else:
return datetime(_snake_case , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
a = '''will be''' if year > datetime.now().year else '''was'''
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 7 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : Optional[int] = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 488 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''gpt_bigcode'''
UpperCAmelCase : str = ['''past_key_values''']
UpperCAmelCase : Dict = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , _UpperCAmelCase : Dict=50_257 , _UpperCAmelCase : List[Any]=1_024 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : str="gelu_pytorch_tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=50_256 , _UpperCAmelCase : Dict=50_256 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Any , ):
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = n_inner
_A = activation_function
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = scale_attn_weights
_A = use_cache
_A = attention_softmax_in_fpaa
_A = scale_attention_softmax_in_fpaa
_A = multi_query
_A = bos_token_id
_A = eos_token_id
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 7 | 0 |
import math
from numpy import inf
from scipy.integrate import quad
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
return quad(_snake_case , 0 , _snake_case , args=(_snake_case) )[0]
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return math.pow(_snake_case , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod() | 483 |
"""simple docstring"""
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
return " ".join(
''.join(word[::-1] ) if len(_snake_case ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 7 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = DebertaTokenizer
UpperCamelCase_ = True
UpperCamelCase_ = DebertaTokenizerFast
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : Union[str, Any] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowercase : List[Any] =dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase : Tuple =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase : Optional[Any] ={'''unk_token''': '''[UNK]'''}
lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def A__ ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def A__ ( self : List[Any] , UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
lowercase : List[str] ='''lower newer'''
lowercase : Dict ='''lower newer'''
return input_text, output_text
def A__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] =self.get_tokenizer()
lowercase : List[Any] ='''lower newer'''
lowercase : Any =['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase : Dict =tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase : Optional[int] =tokens + [tokenizer.unk_token]
lowercase : List[str] =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Tuple =self.get_tokenizer()
lowercase : List[str] =tokenizer('''Hello''' , '''World''' )
lowercase : str =[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , _UpperCAmelCase )
@slow
def A__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] =self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowercase : Any =tokenizer.encode('''sequence builders''' , add_special_tokens=_UpperCAmelCase )
lowercase : str =tokenizer.encode('''multi-sequence build''' , add_special_tokens=_UpperCAmelCase )
lowercase : Tuple =tokenizer.encode(
'''sequence builders''' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
lowercase : Optional[int] =tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
lowercase : Any =tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
lowercase : Union[str, Any] =tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase : Tuple =[self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowercase : Any =tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowercase : int =[
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowercase : Tuple =tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase )
lowercase : Union[str, Any] =[tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) for seq in encoding['''input_ids''']]
# fmt: off
lowercase : str ={
'''input_ids''': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowercase : Dict =[
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , _UpperCAmelCase )
for expected, decoded in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 94 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (KDPMaDiscreteScheduler,)
UpperCAmelCase : Any = 10
def lowerCAmelCase_ ( self : Dict , **_UpperCAmelCase : Optional[Any] ):
_A = {
'num_train_timesteps': 1_100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_UpperCAmelCase )
return config
def lowerCAmelCase_ ( self : Any ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type='v_prediction' )
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4E-0_7 ) < 1E-2
assert abs(result_mean.item() - 6.1_1_1_2E-1_0 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def lowerCAmelCase_ ( self : Optional[Any] ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def lowerCAmelCase_ ( self : Any ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
_A = self.dummy_model()
_A = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if str(_UpperCAmelCase ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 7 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : Optional[Any] = logging.get_logger(__name__)
class _UpperCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowerCAmelCase = '''maskformer-swin'''
lowerCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.02 , a__=1e-5 , a__=None , a__=None , **a__ , ) -> Optional[int]:
super().__init__(**_UpperCAmelCase )
A = image_size
A = patch_size
A = num_channels
A = embed_dim
A = depths
A = len(_UpperCAmelCase )
A = num_heads
A = window_size
A = mlp_ratio
A = qkv_bias
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = drop_path_rate
A = hidden_act
A = use_absolute_embeddings
A = layer_norm_eps
A = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) )
A = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(_UpperCAmelCase ) + 1 )]
A , A = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 641 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any]=10 ) -> Optional[int]:
'''simple docstring'''
_A = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Union[str, Any]=10 ) -> List[str]:
'''simple docstring'''
_A = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(_snake_case , 'schedule.bin' )
torch.save(scheduler.state_dict() , _snake_case )
_A = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
_A = criterion(_UpperCAmelCase , _UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCAmelCase_ ( self : int ):
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_UpperCAmelCase , weight_decay=0.0 , relative_step=_UpperCAmelCase , scale_parameter=_UpperCAmelCase , warmup_init=_UpperCAmelCase , )
for _ in range(1_000 ):
_A = criterion(_UpperCAmelCase , _UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCAmelCase : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCAmelCase : Dict = 10
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=None ):
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase , msg=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_A = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_A = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_A , _A = data
_A = scheduler_func(self.optimizer , **_UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_A = unwrap_schedule(_UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
_UpperCAmelCase , _UpperCAmelCase , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
_A = scheduler_func(self.optimizer , **_UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_UpperCAmelCase ) # wrap to test picklability of the schedule
_A = unwrap_and_save_reload_schedule(_UpperCAmelCase , self.num_steps )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase , msg=F'''failed for {scheduler_func} in save and reload''' )
class lowercase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ):
_A = fn
def __call__( self : Tuple , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[str] ):
return self.fn(*_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Any ):
_A = list(map(self , scheduler.lr_lambdas ) )
| 7 | 0 |
'''simple docstring'''
from collections.abc import Callable
def _lowercase ( UpperCamelCase__ : Callable[[float], float], UpperCamelCase__ : float, UpperCamelCase__ : float ):
__A : List[str] = a
__A : List[str] = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
__A : Any = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
__A : Optional[Any] = mid
else:
__A : str = mid
__A : int = start + (end - start) / 2.0
return mid
def _lowercase ( UpperCamelCase__ : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 365 |
"""simple docstring"""
import math
def _snake_case ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
if (
not isinstance(_snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def _snake_case ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
if (
not isinstance(_snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
"""simple docstring"""
import math
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_snake_case ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 102 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = '''xmod'''
def __init__( self : str , _UpperCAmelCase : Optional[Any]=30_522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Dict=3_072 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Any=1E-1_2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Tuple=("en_XX",) , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
_A = pre_norm
_A = adapter_reduction_factor
_A = adapter_layer_norm
_A = adapter_reuse_layer_norm
_A = ln_before_adapter
_A = list(_UpperCAmelCase )
_A = default_language
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.task == "multiple-choice":
_A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 7 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__lowerCAmelCase )
class _lowerCAmelCase ( __lowerCAmelCase ):
'''simple docstring'''
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(_UpperCAmelCase )
def __call__(self , UpperCAmelCase , **UpperCAmelCase ) -> str:
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def lowercase (self , **UpperCAmelCase ) -> Any:
return {}, {}, {}
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = load_image(_UpperCAmelCase )
_snake_case = image.size
_snake_case = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def lowercase (self , UpperCAmelCase ) -> int:
_snake_case = self.model(**_UpperCAmelCase )
return model_outputs
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = model_outputs.predicted_depth
_snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=_UpperCAmelCase )
_snake_case = prediction.squeeze().cpu().numpy()
_snake_case = (output * 255 / np.max(_UpperCAmelCase )).astype("""uint8""" )
_snake_case = Image.fromarray(_UpperCAmelCase )
_snake_case = {}
_snake_case = predicted_depth
_snake_case = depth
return output_dict | 585 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a = logging.get_logger(__name__)
a = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : Optional[Any] ):
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
_A = model
_A = kwargs.get('model_save_dir' , _UpperCAmelCase )
_A = kwargs.get('latest_model_name' , _UpperCAmelCase )
def __call__( self : Dict , **_UpperCAmelCase : List[Any] ):
_A = {k: np.array(_UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase )
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[Any]=None ):
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
_A = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : List[Any] ):
_A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_A = self.model_save_dir.joinpath(self.latest_model_name )
_A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_A = self.model_save_dir.joinpath(_UpperCAmelCase )
if src_path.exists():
_A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] , ):
if os.path.isfile(_UpperCAmelCase ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[Union[bool, str, None]] = None , _UpperCAmelCase : Optional[Union[str, None]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional["ort.SessionOptions"] = None , **_UpperCAmelCase : Union[str, Any] , ):
_A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase ):
_A = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
_A = Path(_UpperCAmelCase )
# load model from hub
else:
# download model
_A = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
_A = Path(_UpperCAmelCase ).parent
_A = Path(_UpperCAmelCase ).name
_A = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
return cls(model=_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : Tuple , ):
_A = None
if len(str(_UpperCAmelCase ).split('@' ) ) == 2:
_A , _A = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 209 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = '''speech_to_text'''
UpperCAmelCase : List[Any] = ['''past_key_values''']
UpperCAmelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , _UpperCAmelCase : Union[str, Any]=10_000 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2_048 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Tuple=2_048 , _UpperCAmelCase : str=4 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : List[Any]=256 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[str]=6_000 , _UpperCAmelCase : Optional[Any]=1_024 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Any=(5, 5) , _UpperCAmelCase : int=1_024 , _UpperCAmelCase : str=80 , _UpperCAmelCase : Any=1 , **_UpperCAmelCase : Tuple , ):
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(_UpperCAmelCase )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 7 | 0 |
import unittest
import numpy as np
def UpperCamelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray | None = None , ) -> np.ndarray:
'''simple docstring'''
_lowercase : str = np.shape(_snake_case )
_lowercase : Optional[Any] = np.shape(_snake_case )
_lowercase : int = np.shape(_snake_case )
if shape_a[0] != shape_b[0]:
_lowercase : Dict = (
"Expected the same number of rows for A and B. "
f"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(_snake_case )
if shape_b[1] != shape_c[1]:
_lowercase : Optional[Any] = (
"Expected the same number of columns for B and C. "
f"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(_snake_case )
_lowercase : Optional[Any] = pseudo_inv
if a_inv is None:
try:
_lowercase : str = np.linalg.inv(_snake_case )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class __lowercase ( unittest.TestCase ):
def _a(self : Optional[int] ) -> Optional[int]:
_lowercase : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowercase : str = np.array([[0, 3], [3, 0], [2, 3]] )
_lowercase : Optional[int] = np.array([[2, 1], [6, 3]] )
_lowercase : str = schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowercase : List[str] = np.block([[a, b], [b.T, c]] )
_lowercase : int = np.linalg.det(_UpperCAmelCase )
_lowercase : List[str] = np.linalg.det(_UpperCAmelCase )
_lowercase : str = np.linalg.det(_UpperCAmelCase )
self.assertAlmostEqual(_UpperCAmelCase , det_a * det_s )
def _a(self : str ) -> Optional[Any]:
_lowercase : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowercase : Any = np.array([[0, 3], [3, 0], [2, 3]] )
_lowercase : Tuple = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_UpperCAmelCase ):
schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _a(self : List[Any] ) -> Any:
_lowercase : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_lowercase : int = np.array([[0, 3], [3, 0], [2, 3]] )
_lowercase : List[str] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_UpperCAmelCase ):
schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 461 |
"""simple docstring"""
from manim import *
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Rectangle(height=0.5 , width=0.5 )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A = Rectangle(height=0.25 , width=0.25 )
_A = [mem.copy() for i in range(6 )]
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('CPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(4 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('GPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Model' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
_A = []
_A = []
for i, rect in enumerate(_UpperCAmelCase ):
_A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
_A = [meta_mem.copy() for i in range(6 )]
_A = [meta_mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Disk' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
_A = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
_A = Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
_A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_A = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
_A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_A = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_A = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_A = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_A = a_c
_A = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
_A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait()
| 7 | 0 |
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_( __lowerCAmelCase ):
'''simple docstring'''
__lowercase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase=125 ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Dict:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase__ : Tuple = [F"""<extra_id_{i}>""" for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCAmelCase__ : List[Any] = len(set(filter(lambda __UpperCAmelCase : bool("""extra_id""" in str(_UpperCAmelCase ) ) ,_UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
lowerCAmelCase__ : Tuple = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else pad_token
lowerCAmelCase__ : List[str] = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else eos_token
lowerCAmelCase__ : Tuple = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else unk_token
super().__init__(
eos_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,extra_ids=_UpperCAmelCase ,additional_special_tokens=_UpperCAmelCase ,**_UpperCAmelCase ,)
lowerCAmelCase__ : Tuple = extra_ids
lowerCAmelCase__ : Dict = 2**8 # utf is 8 bits
# define special tokens dict
lowerCAmelCase__ : List[str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
lowerCAmelCase__ : Optional[int] = len(self.special_tokens_encoder )
lowerCAmelCase__ : List[Any] = len(_UpperCAmelCase )
for i, token in enumerate(_UpperCAmelCase ):
lowerCAmelCase__ : Any = self.vocab_size + i - n
lowerCAmelCase__ : Dict = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def UpperCAmelCase_ ( self ) -> str:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> str:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCAmelCase )) + [1]
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
if len(_UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple:
lowerCAmelCase__ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Any:
lowerCAmelCase__ : List[Any] = self._add_eos_if_not_present(_UpperCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
lowerCAmelCase__ : int = self._add_eos_if_not_present(_UpperCAmelCase )
return token_ids_a + token_ids_a
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
lowerCAmelCase__ : Optional[Any] = [chr(_UpperCAmelCase ) for i in text.encode("""utf-8""" )]
return tokens
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any:
if token in self.special_tokens_encoder:
lowerCAmelCase__ : Union[str, Any] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
lowerCAmelCase__ : List[str] = self.added_tokens_encoder[token]
elif len(_UpperCAmelCase ) != 1:
lowerCAmelCase__ : Any = self.unk_token_id
else:
lowerCAmelCase__ : Optional[Any] = ord(_UpperCAmelCase ) + self._num_special_tokens
return token_id
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
if index in self.special_tokens_decoder:
lowerCAmelCase__ : str = self.special_tokens_decoder[index]
else:
lowerCAmelCase__ : Optional[int] = chr(index - self._num_special_tokens )
return token
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = B""""""
for token in tokens:
if token in self.special_tokens_decoder:
lowerCAmelCase__ : int = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
lowerCAmelCase__ : Optional[Any] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
lowerCAmelCase__ : int = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
lowerCAmelCase__ : List[str] = token.encode("""utf-8""" )
else:
lowerCAmelCase__ : str = bytes([ord(_UpperCAmelCase )] )
bstring += tok_string
lowerCAmelCase__ : Union[str, Any] = bstring.decode("""utf-8""" ,errors="""ignore""" )
return string
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple:
return ()
| 565 |
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def _snake_case ( ) -> None:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 7 | 0 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A__ ( __lowerCamelCase = 3 ):
"""simple docstring"""
if isinstance(_snake_case, _snake_case ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 1_0:
raise ValueError('number of qubits too large to simulate(>10).' )
_lowerCAmelCase = QuantumRegister(_snake_case, 'qr' )
_lowerCAmelCase = ClassicalRegister(_snake_case, 'cr' )
_lowerCAmelCase = QuantumCircuit(_snake_case, _snake_case )
_lowerCAmelCase = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j), _snake_case, _snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case, number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case, _snake_case )
# simulate with 10000 shots
_lowerCAmelCase = Aer.get_backend('qasm_simulator' )
_lowerCAmelCase = execute(_snake_case, _snake_case, shots=1_0_0_0_0 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \\n {quantum_fourier_transform(3)}'
)
| 589 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
a = logging.getLogger(__name__)
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''whether to use adafactor'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(default=__lowerCAmelCase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[str] = field(
default='''linear''' , metadata={'''help''': f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 7 | 0 |
def __a ( __UpperCAmelCase : int = 10 ) -> str:
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or n < 0:
raise ValueError("Invalid input" )
lowerCamelCase_ : Any = 10**n
lowerCamelCase_ : List[Any] = 28433 * (pow(2 , 7830457 , _snake_case )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(10) = }")
| 488 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 7 | 0 |
import baseaa
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
return baseaa.aaadecode(_snake_case ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 483 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : UNetaDModel
UpperCAmelCase : KarrasVeScheduler
def __init__( self : Any , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Optional[Any] , ):
_A = self.unet.config.sample_size
_A = (batch_size, 3, img_size, img_size)
_A = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_A = self.scheduler.schedule[t]
_A = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_A , _A = self.scheduler.add_noise_to_input(_UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_A = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_A = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_A = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_A = self.scheduler.step_correct(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , step_output.prev_sample , step_output['derivative'] , )
_A = step_output.prev_sample
_A = (sample / 2 + 0.5).clamp(0 , 1 )
_A = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
"""simple docstring"""
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = None
_A = None
_A = graph
self._normalize_graph(_UpperCAmelCase , _UpperCAmelCase )
_A = len(_UpperCAmelCase )
_A = None
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_UpperCAmelCase ) > 1 or len(_UpperCAmelCase ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ):
_A = algorithm(self )
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Union[str, Any] ):
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
self._algorithm()
_A = True
def lowerCAmelCase_ ( self : int ):
pass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Any ):
super().__init__(_UpperCAmelCase )
# use this to save your result
_A = -1
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[Any] ):
super().__init__(_UpperCAmelCase )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def lowerCAmelCase_ ( self : Dict ):
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(_UpperCAmelCase ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(_UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_UpperCAmelCase ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_UpperCAmelCase , _UpperCAmelCase )
self.relabel(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int ):
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a = [0]
a = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 7 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : Optional[Any] = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _lowerCAmelCase ( UpperCamelCase__: Dict ) -> Dict:
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _lowerCAmelCase ( UpperCamelCase__: Optional[int] , UpperCamelCase__: int ) -> List[Any]:
"""simple docstring"""
if args.student_type == "roberta":
A = False
elif args.student_type == "gpt2":
A = False
def _lowerCAmelCase ( UpperCamelCase__: Tuple , UpperCamelCase__: List[str] ) -> int:
"""simple docstring"""
if args.student_type == "roberta":
A = False
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
A = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=_snake_case , required=_snake_case , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=_snake_case , required=_snake_case , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=_snake_case , choices=["""distilbert""", """roberta""", """gpt2"""] , required=_snake_case , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=_snake_case , required=_snake_case , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=_snake_case , type=_snake_case , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=_snake_case , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=_snake_case , required=_snake_case , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=_snake_case , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=_snake_case , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=_snake_case , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=_snake_case , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=_snake_case , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=_snake_case , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=_snake_case , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=_snake_case , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=_snake_case , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=_snake_case , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=_snake_case , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=_snake_case , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.""" , )
parser.add_argument("""--n_epoch""" , type=_snake_case , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=_snake_case , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_snake_case , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=_snake_case , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=_snake_case , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5e-4 , type=_snake_case , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=_snake_case , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=_snake_case , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=_snake_case , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=_snake_case , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=_snake_case , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=_snake_case , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=_snake_case , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=_snake_case , default=5_00 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=_snake_case , default=40_00 , help="""Checkpoint interval.""" )
A = parser.parse_args()
sanity_checks(_snake_case )
# ARGS #
init_gpu_params(_snake_case )
set_seed(_snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(f'Param: {args}' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(_snake_case ) , _snake_case , indent=4 )
git_log(args.dump_path )
A , A , A = MODEL_CLASSES[args.student_type]
A , A , A = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
A = teacher_tokenizer_class.from_pretrained(args.teacher_name )
A = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
A = tokenizer.all_special_tokens.index(_snake_case )
A = tokenizer.all_special_ids[idx]
logger.info(f'Special tokens {special_tok_ids}' )
A = special_tok_ids
A = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'Loading data from {args.data_file}' )
with open(args.data_file , """rb""" ) as fp:
A = pickle.load(_snake_case )
if args.mlm:
logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , """rb""" ) as fp:
A = pickle.load(_snake_case )
A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
A = 0.0 # do not predict special tokens
A = torch.from_numpy(_snake_case )
else:
A = None
A = LmSeqsDataset(params=_snake_case , data=_snake_case )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f'Loading student config from {args.student_config}' )
A = student_config_class.from_pretrained(args.student_config )
A = True
if args.student_pretrained_weights is not None:
logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}' )
A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case )
else:
A = student_model_class(_snake_case )
if args.n_gpu > 0:
student.to(f'cuda:{args.local_rank}' )
logger.info("""Student loaded.""" )
# TEACHER #
A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case )
if args.n_gpu > 0:
teacher.to(f'cuda:{args.local_rank}' )
logger.info(f'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_snake_case , _snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_snake_case , _snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
A = Distiller(
params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case )
distiller.train()
logger.info("""Let\'s go get some drinks.""" )
if __name__ == "__main__":
main()
| 641 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = SpeechTaTokenizer
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[int] = True
def lowerCAmelCase_ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
_A = SpeechTaTokenizer(_UpperCAmelCase )
_A = AddedToken('<mask>' , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )
_A = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
_A = 'this is a test'
_A = 'this is a test'
return input_text, output_text
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict=20 , _UpperCAmelCase : str=5 ):
_A , _A = self.get_input_output_texts(_UpperCAmelCase )
_A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_A = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = '<pad>'
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(_UpperCAmelCase ) , 81 )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase_ ( self : Any ):
_A = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_A = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_A = tokenizer.add_tokens(_UpperCAmelCase )
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size + len(_UpperCAmelCase ) )
_A = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_A = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_A = tokenizer.add_special_tokens(_UpperCAmelCase )
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size_a + len(_UpperCAmelCase ) )
_A = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Any ):
pass
def lowerCAmelCase_ ( self : Dict ):
_A = self.get_tokenizer()
_A = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_A = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
# Use custom sequence because this tokenizer does not handle numbers.
_A = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_A = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_UpperCAmelCase , )
| 7 | 0 |
'''simple docstring'''
def _lowercase ( UpperCamelCase__ : str, UpperCamelCase__ : int ):
__A : Tuple = [[] for _ in range(_snake_case )]
__A : Union[str, Any] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(_snake_case ) <= key:
return input_string
for position, character in enumerate(_snake_case ):
__A : List[Any] = position % (lowest * 2) # puts it in bounds
__A : Tuple = min(_snake_case, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_snake_case )
__A : List[str] = [''.join(_snake_case ) for row in temp_grid]
__A : Dict = ''.join(_snake_case )
return output_string
def _lowercase ( UpperCamelCase__ : str, UpperCamelCase__ : int ):
__A : str = []
__A : Any = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
__A : Union[str, Any] = [[] for _ in range(_snake_case )] # generates template
for position in range(len(_snake_case ) ):
__A : Tuple = position % (lowest * 2) # puts it in bounds
__A : str = min(_snake_case, lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
__A : int = 0
for row in temp_grid: # fills in the characters
__A : Union[str, Any] = input_string[counter : counter + len(_snake_case )]
grid.append(list(_snake_case ) )
counter += len(_snake_case )
__A : Tuple = '' # reads as zigzag
for position in range(len(_snake_case ) ):
__A : Tuple = position % (lowest * 2) # puts it in bounds
__A : Union[str, Any] = min(_snake_case, lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _lowercase ( UpperCamelCase__ : str ):
__A : Dict = {}
for key_guess in range(1, len(_snake_case ) ): # tries every key
__A : Any = decrypt(_snake_case, _snake_case )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 7 | 0 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 102 |
"""simple docstring"""
import argparse
a = '''docs/source/_static/js/custom.js'''
def _snake_case ( _snake_case : Dict ) -> Any:
'''simple docstring'''
with open(_snake_case , encoding='utf-8' , newline='\n' ) as f:
_A = f.readlines()
_A = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
_A = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(_snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
a = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = SpeechTaTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = True
def lowercase (self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case = SpeechTaTokenizer(_UpperCAmelCase )
_snake_case = AddedToken("""<mask>""" , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )
_snake_case = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase (self , UpperCAmelCase ) -> Dict:
_snake_case = """this is a test"""
_snake_case = """this is a test"""
return input_text, output_text
def lowercase (self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=20 , UpperCAmelCase=5 ) -> Any:
_snake_case, _snake_case = self.get_input_output_texts(_UpperCAmelCase )
_snake_case = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_snake_case = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def lowercase (self ) -> int:
_snake_case = """<pad>"""
_snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase (self ) -> List[str]:
_snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(_UpperCAmelCase ) , 81 )
def lowercase (self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowercase (self ) -> Any:
_snake_case = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_snake_case = tokenizer.vocab_size
_snake_case = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_snake_case = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
_snake_case = tokenizer.add_tokens(_UpperCAmelCase )
_snake_case = tokenizer.vocab_size
_snake_case = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size + len(_UpperCAmelCase ) )
_snake_case = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_snake_case = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
_snake_case = tokenizer.add_special_tokens(_UpperCAmelCase )
_snake_case = tokenizer.vocab_size
_snake_case = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size_a + len(_UpperCAmelCase ) )
_snake_case = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowercase (self ) -> Dict:
pass
def lowercase (self ) -> List[Any]:
pass
def lowercase (self ) -> Dict:
_snake_case = self.get_tokenizer()
_snake_case = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_snake_case = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
_snake_case = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_snake_case = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowercase (self ) -> Tuple:
# Use custom sequence because this tokenizer does not handle numbers.
_snake_case = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
_snake_case = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=_UpperCAmelCase , ) | 585 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''vit_mae'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Optional[int]=3_072 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : Optional[Any]=224 , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=512 , _UpperCAmelCase : int=8 , _UpperCAmelCase : List[Any]=2_048 , _UpperCAmelCase : Optional[Any]=0.75 , _UpperCAmelCase : List[str]=False , **_UpperCAmelCase : Union[str, Any] , ):
super().__init__(**_UpperCAmelCase )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = decoder_num_attention_heads
_A = decoder_hidden_size
_A = decoder_num_hidden_layers
_A = decoder_intermediate_size
_A = mask_ratio
_A = norm_pix_loss
| 7 | 0 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _SCREAMING_SNAKE_CASE:
def __init__( self : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int]=1_00 , UpperCamelCase_ : Tuple=13 , UpperCamelCase_ : Dict=30 , UpperCamelCase_ : str=2 , UpperCamelCase_ : int=3 , UpperCamelCase_ : Any=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : Union[str, Any]=32 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : str=37 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Union[str, Any]=10 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : int=None , UpperCamelCase_ : Union[str, Any]=[0, 1, 2, 3] , ) -> str:
SCREAMING_SNAKE_CASE__ :List[str] = parent
SCREAMING_SNAKE_CASE__ :int = 1_00
SCREAMING_SNAKE_CASE__ :Optional[int] = batch_size
SCREAMING_SNAKE_CASE__ :Optional[Any] = image_size
SCREAMING_SNAKE_CASE__ :Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ :Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ :Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ :Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ :int = hidden_size
SCREAMING_SNAKE_CASE__ :Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ :List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ :Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ :Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ :Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ :List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ :List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ :List[str] = initializer_range
SCREAMING_SNAKE_CASE__ :int = scope
SCREAMING_SNAKE_CASE__ :Optional[int] = out_indices
SCREAMING_SNAKE_CASE__ :Tuple = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ :Optional[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ :Optional[int] = num_patches + 1
def __lowerCamelCase ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ :List[str] = None
SCREAMING_SNAKE_CASE__ :Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ :Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ :Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCamelCase ( self : Tuple ) -> List[Any]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __lowerCamelCase ( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Any = BeitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ :Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :Optional[int] = BeitForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCamelCase ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Optional[int] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ :Union[str, Any] = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ :List[str] = 1
SCREAMING_SNAKE_CASE__ :Dict = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ :Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ :List[str] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCamelCase ( self : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ :List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ :Optional[int] = BeitForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
SCREAMING_SNAKE_CASE__ :str = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __lowerCamelCase ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ :Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
A_ : Optional[int] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ : Union[str, Any] = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ : Any = False
A_ : Any = False
A_ : List[str] = False
def __lowerCamelCase ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ :Tuple = BeitModelTester(self )
SCREAMING_SNAKE_CASE__ :Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def __lowerCamelCase ( self : Any ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def __lowerCamelCase ( self : str ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowerCamelCase ( self : Dict ) -> List[str]:
pass
def __lowerCamelCase ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ :Optional[int] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def __lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ :List[Any] = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ :Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ :str = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def __lowerCamelCase ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowerCamelCase ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def __lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def __lowerCamelCase ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def __lowerCamelCase ( self : Any ) -> List[str]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ :Any = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
SCREAMING_SNAKE_CASE__ :Any = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ :Dict = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Any = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowerCamelCase ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE__ :List[str] = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ :List[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :int = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowerCamelCase ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ :Any = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ :Optional[Any] = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __lowerCamelCase ( self : Optional[Any] ) -> str:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ :int = BeitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def lowerCamelCase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self : Optional[Any] ) -> str:
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE__ :Tuple = prepare_img()
SCREAMING_SNAKE_CASE__ :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values.to(_UpperCAmelCase )
# prepare bool_masked_pos
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.ones((1, 1_96) , dtype=torch.bool ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :Dict = model(pixel_values=_UpperCAmelCase , bool_masked_pos=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Any = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Tuple = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _UpperCAmelCase , atol=1e-2 ) )
@slow
def __lowerCamelCase ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ :Tuple = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Any = self.default_image_processor
SCREAMING_SNAKE_CASE__ :Dict = prepare_img()
SCREAMING_SNAKE_CASE__ :int = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :int = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Tuple = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ :List[str] = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :List[str] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
SCREAMING_SNAKE_CASE__ :Optional[Any] = 2_81
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def __lowerCamelCase ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ :List[Any] = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Any = self.default_image_processor
SCREAMING_SNAKE_CASE__ :Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE__ :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :Optional[int] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Tuple = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ :Dict = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :List[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
SCREAMING_SNAKE_CASE__ :Any = 23_96
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def __lowerCamelCase ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :str = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE__ :Any = model.to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :str = BeitImageProcessor(do_resize=_UpperCAmelCase , size=6_40 , do_center_crop=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Optional[Any] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
SCREAMING_SNAKE_CASE__ :Optional[Any] = Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE__ :Optional[Any] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :Any = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Optional[int] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Optional[int] = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=_UpperCAmelCase , )
else:
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def __lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :int = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
SCREAMING_SNAKE_CASE__ :str = model.to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Optional[int] = BeitImageProcessor(do_resize=_UpperCAmelCase , size=6_40 , do_center_crop=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Optional[int] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
SCREAMING_SNAKE_CASE__ :List[str] = Image.open(ds[0]['file'] )
SCREAMING_SNAKE_CASE__ :Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :Dict = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :str = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(5_00, 3_00)] )
SCREAMING_SNAKE_CASE__ :Any = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Optional[Any] = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 209 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
a = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _snake_case ( _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
_A = torch.load(_snake_case , map_location='cpu' )
return sd
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Tuple=rename_keys_prefix ) -> List[str]:
'''simple docstring'''
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1] )
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def _snake_case ( _snake_case : List[str] , _snake_case : Dict ) -> Dict:
'''simple docstring'''
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = 'pretraining'
if "vcr" in checkpoint_path:
_A = {'visual_embedding_dim': 5_12}
elif "vqa_advanced" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
elif "vqa" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
elif "nlvr" in checkpoint_path:
_A = {'visual_embedding_dim': 10_24}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
_A = {'visual_embedding_dim': 5_12}
_A = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
_A = 'vqa_advanced'
elif "vqa" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48, 'num_labels': 31_29}
_A = 'vqa'
elif "nlvr" in checkpoint_path:
_A = {
'visual_embedding_dim': 10_24,
'num_labels': 2,
}
_A = 'nlvr'
_A = VisualBertConfig(**_snake_case )
# Load State Dict
_A = load_state_dict(_snake_case )
_A = get_new_dict(_snake_case , _snake_case )
if model_type == "pretraining":
_A = VisualBertForPreTraining(_snake_case )
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(_snake_case )
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(_snake_case )
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(_snake_case )
model.load_state_dict(_snake_case )
# Save Checkpoints
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
a = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 7 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def UpperCamelCase ( _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : float = 1 , _UpperCAmelCase : float = 1 , _UpperCAmelCase : float = 1.0e4 , _UpperCAmelCase : bool = False , _UpperCAmelCase : float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even"""
_lowercase : Optional[Any] = float(embedding_dim // 2 )
_lowercase : Union[str, Any] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_lowercase : List[Any] = min_timescale * jnp.exp(jnp.arange(_snake_case , dtype=jnp.floataa ) * -log_timescale_increment )
_lowercase : str = jnp.expand_dims(_snake_case , 1 ) * jnp.expand_dims(_snake_case , 0 )
# scale embeddings
_lowercase : Any = scale * emb
if flip_sin_to_cos:
_lowercase : Tuple = jnp.concatenate([jnp.cos(_snake_case ), jnp.sin(_snake_case )] , axis=1 )
else:
_lowercase : Dict = jnp.concatenate([jnp.sin(_snake_case ), jnp.cos(_snake_case )] , axis=1 )
_lowercase : Optional[Any] = jnp.reshape(_snake_case , [jnp.shape(_snake_case )[0], embedding_dim] )
return signal
class __lowercase ( nn.Module ):
_A = 32
_A = jnp.floataa
@nn.compact
def __call__(self : Union[str, Any] , snake_case : str ) -> Tuple:
_lowercase : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(_UpperCAmelCase )
_lowercase : Dict = nn.silu(_UpperCAmelCase )
_lowercase : List[str] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(_UpperCAmelCase )
return temb
class __lowercase ( nn.Module ):
_A = 32
_A = False
_A = 1
@nn.compact
def __call__(self : Optional[Any] , snake_case : str ) -> int:
return get_sinusoidal_embeddings(
_UpperCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 461 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
for param in module.parameters():
_A = False
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_A = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = plt.imshow(_snake_case )
fig.axes.get_xaxis().set_visible(_snake_case )
fig.axes.get_yaxis().set_visible(_snake_case )
plt.show()
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = datetime.now()
_A = current_time.strftime('%H:%M:%S' )
return timestamp
| 7 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_lowerCAmelCase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_lowerCAmelCase = {'''facebook/blenderbot-3B''': 128}
class lowerCAmelCase_( __lowerCAmelCase ):
'''simple docstring'''
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ['''input_ids''', '''attention_mask''']
__lowercase : Any = BlenderbotTokenizer
def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase="replace" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="<mask>" ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> Dict:
super().__init__(
_UpperCAmelCase ,_UpperCAmelCase ,tokenizer_file=_UpperCAmelCase ,errors=_UpperCAmelCase ,bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,cls_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ,trim_offsets=_UpperCAmelCase ,**_UpperCAmelCase ,)
lowerCAmelCase__ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ : int = getattr(_UpperCAmelCase ,pre_tok_state.pop("""type""" ) )
lowerCAmelCase__ : Tuple = add_prefix_space
lowerCAmelCase__ : Optional[int] = pre_tok_class(**_UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = add_prefix_space
lowerCAmelCase__ : int = """post_processor"""
lowerCAmelCase__ : Union[str, Any] = getattr(self.backend_tokenizer ,_UpperCAmelCase ,_UpperCAmelCase )
if tokenizer_component_instance:
lowerCAmelCase__ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase__ : int = tuple(state["""sep"""] )
if "cls" in state:
lowerCAmelCase__ : int = tuple(state["""cls"""] )
lowerCAmelCase__ : List[str] = False
if state.get("""add_prefix_space""" ,_UpperCAmelCase ) != add_prefix_space:
lowerCAmelCase__ : Dict = add_prefix_space
lowerCAmelCase__ : Union[str, Any] = True
if state.get("""trim_offsets""" ,_UpperCAmelCase ) != trim_offsets:
lowerCAmelCase__ : int = trim_offsets
lowerCAmelCase__ : Dict = True
if changes_to_apply:
lowerCAmelCase__ : Tuple = getattr(_UpperCAmelCase ,state.pop("""type""" ) )
lowerCAmelCase__ : List[str] = component_class(**_UpperCAmelCase )
setattr(self.backend_tokenizer ,_UpperCAmelCase ,_UpperCAmelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else value
lowerCAmelCase__ : List[Any] = value
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
lowerCAmelCase__ : str = kwargs.get("""is_split_into_words""" ,_UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCAmelCase ,**_UpperCAmelCase )
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Any = kwargs.get("""is_split_into_words""" ,_UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCAmelCase ,**_UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Dict:
lowerCAmelCase__ : int = self._tokenizer.model.save(_UpperCAmelCase ,name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Dict:
lowerCAmelCase__ : Optional[int] = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(_UpperCAmelCase )
lowerCAmelCase__ : Dict = """ """.join(_UpperCAmelCase )
lowerCAmelCase__ : Tuple = self.encode(_UpperCAmelCase )
if len(_UpperCAmelCase ) > self.model_max_length:
lowerCAmelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 565 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
UpperCAmelCase : Optional[int] = '''ViTImageProcessor'''
UpperCAmelCase : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Tuple , _UpperCAmelCase : int=None , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : Dict ):
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[Any] , _UpperCAmelCase : int=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Union[str, Any] ):
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
_A = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
_A = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_A = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self : Tuple ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 7 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ ( __lowerCAmelCase ,__lowerCAmelCase ,unittest.TestCase ):
UpperCamelCase : List[str] = IFPipeline
UpperCamelCase : List[str] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
UpperCamelCase : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase : Any = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self ):
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self , __magic_name__ , __magic_name__=0 ):
"""simple docstring"""
if str(_UpperCAmelCase ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(_UpperCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase = None
_lowerCAmelCase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase = IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase = IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type='np' , )
_lowerCAmelCase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
_lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
_lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type='np' , )
_lowerCAmelCase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
_lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
_lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_start_torch_memory_measurement()
_lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(_UpperCAmelCase )
_lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type='np' , )
_lowerCAmelCase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
_lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
_lowerCAmelCase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(_UpperCAmelCase )
_lowerCAmelCase = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
_lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def A__ ( ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 589 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def _snake_case ( _snake_case : int ) -> datetime:
'''simple docstring'''
_A = year % 19
_A = year % 4
_A = year % 7
_A = math.floor(year / 1_00 )
_A = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_A = leap_day_inhibits / 4
_A = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_A = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_A = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_A = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 18 )
else:
return datetime(_snake_case , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
a = '''will be''' if year > datetime.now().year else '''was'''
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 7 | 0 |
import re
def __a ( __UpperCAmelCase : str ) -> str:
"""simple docstring"""
if len(re.findall("[ATCG]" , _snake_case ) ) != len(_snake_case ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 488 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''gpt_bigcode'''
UpperCAmelCase : str = ['''past_key_values''']
UpperCAmelCase : Dict = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , _UpperCAmelCase : Dict=50_257 , _UpperCAmelCase : List[Any]=1_024 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : str="gelu_pytorch_tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=50_256 , _UpperCAmelCase : Dict=50_256 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Any , ):
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = n_inner
_A = activation_function
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = scale_attn_weights
_A = use_cache
_A = attention_softmax_in_fpaa
_A = scale_attention_softmax_in_fpaa
_A = multi_query
_A = bos_token_id
_A = eos_token_id
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 7 | 0 |
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
a_ = """"""
while len(_snake_case ) % 3 != 0:
a_ = """0""" + bin_string
a_ = [
bin_string[index : index + 3]
for index in range(len(_snake_case ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
a_ = 0
for index, val in enumerate(_snake_case ):
oct_val += int(2 ** (2 - index) * int(_snake_case ) )
oct_string += str(_snake_case )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod() | 483 |
"""simple docstring"""
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
return " ".join(
''.join(word[::-1] ) if len(_snake_case ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 7 | 0 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
SCREAMING_SNAKE_CASE = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SCREAMING_SNAKE_CASE = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def lowercase_ ( __A : int ) -> List[str]:
"""simple docstring"""
lowercase : Dict =numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_snake_case )[0]
@deprecated(_snake_case , '''Please use tf.data to implement this functionality.''' )
def lowercase_ ( __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_snake_case ) as bytestream:
lowercase : Optional[int] =_readaa(_snake_case )
if magic != 2_0_5_1:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
lowercase : Optional[int] =_readaa(_snake_case )
lowercase : Optional[int] =_readaa(_snake_case )
lowercase : List[str] =_readaa(_snake_case )
lowercase : str =bytestream.read(rows * cols * num_images )
lowercase : List[str] =numpy.frombuffer(_snake_case , dtype=numpy.uinta )
lowercase : List[str] =data.reshape(_snake_case , _snake_case , _snake_case , 1 )
return data
@deprecated(_snake_case , '''Please use tf.one_hot on tensors.''' )
def lowercase_ ( __A : List[str] , __A : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : List[Any] =labels_dense.shape[0]
lowercase : List[Any] =numpy.arange(_snake_case ) * num_classes
lowercase : Optional[Any] =numpy.zeros((num_labels, num_classes) )
lowercase : List[Any] =1
return labels_one_hot
@deprecated(_snake_case , '''Please use tf.data to implement this functionality.''' )
def lowercase_ ( __A : Dict , __A : int=False , __A : Tuple=1_0 ) -> int:
"""simple docstring"""
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_snake_case ) as bytestream:
lowercase : Optional[Any] =_readaa(_snake_case )
if magic != 2_0_4_9:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
lowercase : int =_readaa(_snake_case )
lowercase : Tuple =bytestream.read(_snake_case )
lowercase : List[str] =numpy.frombuffer(_snake_case , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_snake_case , _snake_case )
return labels
class UpperCAmelCase_ :
"""simple docstring"""
@deprecated(
_UpperCAmelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : str=dtypes.floataa , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=None , ) -> str:
'''simple docstring'''
lowercase , lowercase : Tuple =random_seed.get_seed(_UpperCAmelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase : Optional[int] =dtypes.as_dtype(_UpperCAmelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
lowercase : Dict =1_0000
lowercase : Optional[Any] =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
lowercase : Tuple =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase : Optional[int] =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase : int =images.astype(numpy.floataa )
lowercase : List[str] =numpy.multiply(_UpperCAmelCase , 1.0 / 2_5_5.0 )
lowercase : Optional[Any] =images
lowercase : Dict =labels
lowercase : Dict =0
lowercase : Tuple =0
@property
def A__ ( self : Dict ) -> int:
'''simple docstring'''
return self._images
@property
def A__ ( self : int ) -> Any:
'''simple docstring'''
return self._labels
@property
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
return self._num_examples
@property
def A__ ( self : Any ) -> List[Any]:
'''simple docstring'''
return self._epochs_completed
def A__ ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : List[Any]=True ) -> Dict:
'''simple docstring'''
if fake_data:
lowercase : List[str] =[1] * 784
lowercase : Dict =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_UpperCAmelCase )],
[fake_label for _ in range(_UpperCAmelCase )],
)
lowercase : Union[str, Any] =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase : List[str] =numpy.arange(self._num_examples )
numpy.random.shuffle(_UpperCAmelCase )
lowercase : Tuple =self.images[perma]
lowercase : Optional[Any] =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase : Optional[Any] =self._num_examples - start
lowercase : Union[str, Any] =self._images[start : self._num_examples]
lowercase : Tuple =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase : List[str] =numpy.arange(self._num_examples )
numpy.random.shuffle(_UpperCAmelCase )
lowercase : Union[str, Any] =self.images[perm]
lowercase : Union[str, Any] =self.labels[perm]
# Start next epoch
lowercase : List[str] =0
lowercase : str =batch_size - rest_num_examples
lowercase : Any =self._index_in_epoch
lowercase : Optional[Any] =self._images[start:end]
lowercase : int =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase : List[str] =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_snake_case , '''Please write your own downloading logic.''' )
def lowercase_ ( __A : Optional[Any] , __A : Dict , __A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not gfile.Exists(_snake_case ):
gfile.MakeDirs(_snake_case )
lowercase : Any =os.path.join(_snake_case , _snake_case )
if not gfile.Exists(_snake_case ):
urllib.request.urlretrieve(_snake_case , _snake_case ) # noqa: S310
with gfile.GFile(_snake_case ) as f:
lowercase : Any =f.size()
print('''Successfully downloaded''' , _snake_case , _snake_case , '''bytes.''' )
return filepath
@deprecated(
_snake_case , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def lowercase_ ( __A : Dict , __A : List[str]=False , __A : Optional[Any]=False , __A : int=dtypes.floataa , __A : Union[str, Any]=True , __A : int=5_0_0_0 , __A : Any=None , __A : str=DEFAULT_SOURCE_URL , ) -> Union[str, Any]:
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_snake_case , one_hot=_snake_case , dtype=_snake_case , seed=_snake_case )
lowercase : Optional[Any] =fake()
lowercase : Union[str, Any] =fake()
lowercase : Optional[Any] =fake()
return _Datasets(train=_snake_case , validation=_snake_case , test=_snake_case )
if not source_url: # empty string check
lowercase : Dict =DEFAULT_SOURCE_URL
lowercase : List[Any] ='''train-images-idx3-ubyte.gz'''
lowercase : str ='''train-labels-idx1-ubyte.gz'''
lowercase : Any ='''t10k-images-idx3-ubyte.gz'''
lowercase : int ='''t10k-labels-idx1-ubyte.gz'''
lowercase : List[Any] =_maybe_download(
_snake_case , _snake_case , source_url + train_images_file )
with gfile.Open(_snake_case , '''rb''' ) as f:
lowercase : Dict =_extract_images(_snake_case )
lowercase : Any =_maybe_download(
_snake_case , _snake_case , source_url + train_labels_file )
with gfile.Open(_snake_case , '''rb''' ) as f:
lowercase : Tuple =_extract_labels(_snake_case , one_hot=_snake_case )
lowercase : str =_maybe_download(
_snake_case , _snake_case , source_url + test_images_file )
with gfile.Open(_snake_case , '''rb''' ) as f:
lowercase : str =_extract_images(_snake_case )
lowercase : int =_maybe_download(
_snake_case , _snake_case , source_url + test_labels_file )
with gfile.Open(_snake_case , '''rb''' ) as f:
lowercase : List[Any] =_extract_labels(_snake_case , one_hot=_snake_case )
if not 0 <= validation_size <= len(_snake_case ):
lowercase : Optional[Any] =(
'''Validation size should be between 0 and '''
F'{len(_snake_case )}. Received: {validation_size}.'
)
raise ValueError(_snake_case )
lowercase : int =train_images[:validation_size]
lowercase : Tuple =train_labels[:validation_size]
lowercase : List[str] =train_images[validation_size:]
lowercase : Dict =train_labels[validation_size:]
lowercase : Tuple ={'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
lowercase : int =_DataSet(_snake_case , _snake_case , **_snake_case )
lowercase : Dict =_DataSet(_snake_case , _snake_case , **_snake_case )
lowercase : List[Any] =_DataSet(_snake_case , _snake_case , **_snake_case )
return _Datasets(train=_snake_case , validation=_snake_case , test=_snake_case )
| 94 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (KDPMaDiscreteScheduler,)
UpperCAmelCase : Any = 10
def lowerCAmelCase_ ( self : Dict , **_UpperCAmelCase : Optional[Any] ):
_A = {
'num_train_timesteps': 1_100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_UpperCAmelCase )
return config
def lowerCAmelCase_ ( self : Any ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type='v_prediction' )
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4E-0_7 ) < 1E-2
assert abs(result_mean.item() - 6.1_1_1_2E-1_0 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def lowerCAmelCase_ ( self : Optional[Any] ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def lowerCAmelCase_ ( self : Any ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
_A = self.dummy_model()
_A = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if str(_UpperCAmelCase ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 7 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : str = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCamelCase__: List[Any] , UpperCamelCase__: Dict=False , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: str=False ) -> Optional[Any]:
"""simple docstring"""
A = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'transformer.blocks.{i}.norm1.weight', f'vilt.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm1.bias', f'vilt.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.weight', f'vilt.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'transformer.blocks.{i}.attn.proj.bias', f'vilt.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.norm2.weight', f'vilt.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'transformer.blocks.{i}.norm2.bias', f'vilt.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'transformer.blocks.{i}.mlp.fc1.weight', f'vilt.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc1.bias', f'vilt.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.weight', f'vilt.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'transformer.blocks.{i}.mlp.fc2.bias', f'vilt.encoder.layer.{i}.output.dense.bias') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def _lowerCAmelCase ( UpperCamelCase__: int , UpperCamelCase__: int ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
A = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.weight' )
A = state_dict.pop(f'transformer.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[
: config.hidden_size, :
]
A = in_proj_bias[: config.hidden_size]
A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A = in_proj_weight[
-config.hidden_size :, :
]
A = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( UpperCamelCase__: Tuple ) -> str:
"""simple docstring"""
A = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def _lowerCAmelCase ( UpperCamelCase__: Dict , UpperCamelCase__: str , UpperCamelCase__: Tuple ) -> str:
"""simple docstring"""
A = dct.pop(_snake_case )
A = val
@torch.no_grad()
def _lowerCAmelCase ( UpperCamelCase__: List[str] , UpperCamelCase__: Dict ) -> List[str]:
"""simple docstring"""
A = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=_snake_case )
A = False
A = False
A = False
A = False
if "vqa" in checkpoint_url:
A = True
A = 31_29
A = """huggingface/label-files"""
A = """vqa2-id2label.json"""
A = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="""dataset""" ) , """r""" ) )
A = {int(_snake_case ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = ViltForQuestionAnswering(_snake_case )
elif "nlvr" in checkpoint_url:
A = True
A = 2
A = {0: """False""", 1: """True"""}
A = {v: k for k, v in config.idalabel.items()}
A = 3
A = ViltForImagesAndTextClassification(_snake_case )
elif "irtr" in checkpoint_url:
A = True
A = ViltForImageAndTextRetrieval(_snake_case )
elif "mlm_itm" in checkpoint_url:
A = True
A = ViltForMaskedLM(_snake_case )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
A = torch.hub.load_state_dict_from_url(_snake_case , map_location="""cpu""" )["""state_dict"""]
A = create_rename_keys(_snake_case , _snake_case , _snake_case , _snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case )
if mlm_model or irtr_model:
A = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
A , A = model.load_state_dict(_snake_case , strict=_snake_case )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_snake_case )
# Define processor
A = ViltImageProcessor(size=3_84 )
A = BertTokenizer.from_pretrained("""bert-base-uncased""" )
A = ViltProcessor(_snake_case , _snake_case )
# Forward pass on example inputs (image + text)
if nlvr_model:
A = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=_snake_case ).raw )
A = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=_snake_case ).raw )
A = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
A = processor(_snake_case , _snake_case , return_tensors="""pt""" )
A = processor(_snake_case , _snake_case , return_tensors="""pt""" )
A = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
A = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=_snake_case ).raw )
if mlm_model:
A = """a bunch of [MASK] laying on a [MASK]."""
else:
A = """How many cats are there?"""
A = processor(_snake_case , _snake_case , return_tensors="""pt""" )
A = model(**_snake_case )
# Verify outputs
if mlm_model:
A = torch.Size([1, 11, 3_05_22] )
A = torch.tensor([-12.50_61, -12.51_23, -12.51_74] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _snake_case , atol=1e-4 )
# verify masked token prediction equals "cats"
A = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
A = torch.Size([1, 31_29] )
A = torch.tensor([-15.94_95, -18.14_72, -10.30_41] )
assert torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _snake_case , atol=1e-4 )
# verify vqa prediction equals "2"
A = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
A = torch.Size([1, 2] )
A = torch.tensor([-2.87_21, 2.12_91] )
assert torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_lowercase : List[str] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 641 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any]=10 ) -> Optional[int]:
'''simple docstring'''
_A = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Union[str, Any]=10 ) -> List[str]:
'''simple docstring'''
_A = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(_snake_case , 'schedule.bin' )
torch.save(scheduler.state_dict() , _snake_case )
_A = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
_A = criterion(_UpperCAmelCase , _UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCAmelCase_ ( self : int ):
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_UpperCAmelCase , weight_decay=0.0 , relative_step=_UpperCAmelCase , scale_parameter=_UpperCAmelCase , warmup_init=_UpperCAmelCase , )
for _ in range(1_000 ):
_A = criterion(_UpperCAmelCase , _UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCAmelCase : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCAmelCase : Dict = 10
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=None ):
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase , msg=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_A = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_A = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_A , _A = data
_A = scheduler_func(self.optimizer , **_UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_A = unwrap_schedule(_UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
_UpperCAmelCase , _UpperCAmelCase , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
_A = scheduler_func(self.optimizer , **_UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_UpperCAmelCase ) # wrap to test picklability of the schedule
_A = unwrap_and_save_reload_schedule(_UpperCAmelCase , self.num_steps )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase , msg=F'''failed for {scheduler_func} in save and reload''' )
class lowercase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ):
_A = fn
def __call__( self : Tuple , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[str] ):
return self.fn(*_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Any ):
_A = list(map(self , scheduler.lr_lambdas ) )
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( UpperCamelCase__ : int = 4 ):
__A : Any = abs(_snake_case ) or 4
return [[1 + x + y * row_size for x in range(_snake_case )] for y in range(_snake_case )]
def _lowercase ( UpperCamelCase__ : list[list[int]] ):
return reverse_row(transpose(_snake_case ) )
# OR.. transpose(reverse_column(matrix))
def _lowercase ( UpperCamelCase__ : list[list[int]] ):
return reverse_row(reverse_column(_snake_case ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowercase ( UpperCamelCase__ : list[list[int]] ):
return reverse_column(transpose(_snake_case ) )
# OR.. transpose(reverse_row(matrix))
def _lowercase ( UpperCamelCase__ : list[list[int]] ):
__A : Optional[Any] = [list(_snake_case ) for x in zip(*_snake_case )]
return matrix
def _lowercase ( UpperCamelCase__ : list[list[int]] ):
__A : Optional[int] = matrix[::-1]
return matrix
def _lowercase ( UpperCamelCase__ : list[list[int]] ):
__A : Optional[Any] = [x[::-1] for x in matrix]
return matrix
def _lowercase ( UpperCamelCase__ : list[list[int]] ):
for i in matrix:
print(*_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ : Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
UpperCAmelCase_ : Any = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
UpperCAmelCase_ : int = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 365 |
"""simple docstring"""
import math
def _snake_case ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
if (
not isinstance(_snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def _snake_case ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
if (
not isinstance(_snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__magic_name__ : Tuple = logging.get_logger(__name__)
class lowercase__ ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple = ['''input_features''', '''attention_mask''']
def __init__( self , _A=8_0 , _A=1_6_0_0_0 , _A=0.0 , _A=1_0 , _A=2_5 , _A="hamming_window" , _A=3_2_7_6_8.0 , _A=0.97 , _A=1.0 , _A=True , _A=True , _A=False , **_A , ):
'''simple docstring'''
super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase : List[str] = feature_size
UpperCamelCase : str = sampling_rate
UpperCamelCase : List[str] = padding_value
UpperCamelCase : Dict = hop_length
UpperCamelCase : Union[str, Any] = win_length
UpperCamelCase : Union[str, Any] = frame_signal_scale
UpperCamelCase : Optional[Any] = preemphasis_coeff
UpperCamelCase : Optional[int] = mel_floor
UpperCamelCase : Union[str, Any] = normalize_means
UpperCamelCase : Optional[int] = normalize_vars
UpperCamelCase : str = win_function
UpperCamelCase : List[str] = return_attention_mask
UpperCamelCase : Union[str, Any] = win_length * sampling_rate // 1_0_0_0
UpperCamelCase : Optional[int] = hop_length * sampling_rate // 1_0_0_0
UpperCamelCase : Dict = optimal_fft_length(self.sample_size )
UpperCamelCase : int = (self.n_fft // 2) + 1
def _a ( self , _A ):
'''simple docstring'''
if self.win_function == "hamming_window":
UpperCamelCase : Optional[int] = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase )
else:
UpperCamelCase : Optional[Any] = window_function(window_length=self.sample_size , name=self.win_function )
UpperCamelCase : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
UpperCamelCase : List[str] = spectrogram(
one_waveform * self.frame_signal_scale , window=_UpperCAmelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_UpperCAmelCase , preemphasis=self.preemphasis_coeff , mel_filters=_UpperCAmelCase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def _a ( self , _A , _A , _A ):
'''simple docstring'''
if self.normalize_means:
UpperCamelCase : Optional[int] = x[:input_length].mean(axis=0 )
UpperCamelCase : Dict = np.subtract(_UpperCAmelCase , _UpperCAmelCase )
if self.normalize_vars:
UpperCamelCase : List[Any] = x[:input_length].std(axis=0 )
UpperCamelCase : Dict = np.divide(_UpperCAmelCase , _UpperCAmelCase )
if input_length < x.shape[0]:
UpperCamelCase : Union[str, Any] = padding_value
# make sure array is in float32
UpperCamelCase : Any = x.astype(np.floataa )
return x
def _a ( self , _A , _A = None ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_UpperCAmelCase , _UpperCAmelCase , self.padding_value ) for x, n in zip(_UpperCAmelCase , _UpperCAmelCase )]
def __call__( self , _A , _A = False , _A = None , _A = False , _A = None , _A = None , _A = None , _A = None , **_A , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCamelCase : Tuple = isinstance(_UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : List[str] = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : Optional[int] = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ):
UpperCamelCase : List[str] = np.asarray(_UpperCAmelCase , dtype=np.floataa )
elif isinstance(_UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Dict = [raw_speech]
# extract fbank features
UpperCamelCase : Tuple = [self._extract_mfsc_features(_UpperCAmelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCamelCase : Tuple = BatchFeature({"""input_features""": features} )
UpperCamelCase : Optional[Any] = self.pad(
_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
# make sure list is in array format
UpperCamelCase : List[str] = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , _UpperCAmelCase ):
UpperCamelCase : Any = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for feature in input_features]
UpperCamelCase : Dict = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCamelCase : Dict = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCamelCase : List[Any] = (
np.array(_UpperCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCamelCase : Dict = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_UpperCAmelCase )
if return_tensors is not None:
UpperCamelCase : Any = padded_inputs.convert_to_tensors(_UpperCAmelCase )
return padded_inputs
| 102 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = '''xmod'''
def __init__( self : str , _UpperCAmelCase : Optional[Any]=30_522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Dict=3_072 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Any=1E-1_2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Tuple=("en_XX",) , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
_A = pre_norm
_A = adapter_reduction_factor
_A = adapter_layer_norm
_A = adapter_reuse_layer_norm
_A = ln_before_adapter
_A = list(_UpperCAmelCase )
_A = default_language
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.task == "multiple-choice":
_A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 7 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class _lowerCAmelCase ( __lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = '''rwkv'''
lowerCAmelCase_ = {'''max_position_embeddings''': '''context_length'''}
def __init__(self , UpperCAmelCase=50277 , UpperCAmelCase=1024 , UpperCAmelCase=4096 , UpperCAmelCase=32 , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1e-5 , UpperCAmelCase=0 , UpperCAmelCase=0 , UpperCAmelCase=6 , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> List[Any]:
_snake_case = vocab_size
_snake_case = context_length
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = attention_hidden_size if attention_hidden_size is not None else hidden_size
_snake_case = intermediate_size if intermediate_size is not None else 4 * hidden_size
_snake_case = layer_norm_epsilon
_snake_case = rescale_every
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) | 585 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a = logging.get_logger(__name__)
a = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : Optional[Any] ):
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
_A = model
_A = kwargs.get('model_save_dir' , _UpperCAmelCase )
_A = kwargs.get('latest_model_name' , _UpperCAmelCase )
def __call__( self : Dict , **_UpperCAmelCase : List[Any] ):
_A = {k: np.array(_UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase )
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[Any]=None ):
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
_A = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : List[Any] ):
_A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_A = self.model_save_dir.joinpath(self.latest_model_name )
_A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_A = self.model_save_dir.joinpath(_UpperCAmelCase )
if src_path.exists():
_A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] , ):
if os.path.isfile(_UpperCAmelCase ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[Union[bool, str, None]] = None , _UpperCAmelCase : Optional[Union[str, None]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional["ort.SessionOptions"] = None , **_UpperCAmelCase : Union[str, Any] , ):
_A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase ):
_A = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
_A = Path(_UpperCAmelCase )
# load model from hub
else:
# download model
_A = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
_A = Path(_UpperCAmelCase ).parent
_A = Path(_UpperCAmelCase ).name
_A = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
return cls(model=_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : Tuple , ):
_A = None
if len(str(_UpperCAmelCase ).split('@' ) ) == 2:
_A , _A = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
| 7 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@property
def __lowerCamelCase ( self : Tuple ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ :str = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __lowerCamelCase ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Any = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE__ :Optional[Any] = ScoreSdeVeScheduler()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = ScoreSdeVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
sde_ve.to(_UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ :Dict = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ :int = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_UpperCAmelCase , return_dict=_UpperCAmelCase )[
0
]
SCREAMING_SNAKE_CASE__ :str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ :Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ :Optional[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __lowerCamelCase ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ :str = 'google/ncsnpp-church-256'
SCREAMING_SNAKE_CASE__ :Dict = UNetaDModel.from_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Tuple = ScoreSdeVeScheduler.from_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Tuple = ScoreSdeVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
sde_ve.to(_UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :List[str] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ :str = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 209 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = '''speech_to_text'''
UpperCAmelCase : List[Any] = ['''past_key_values''']
UpperCAmelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , _UpperCAmelCase : Union[str, Any]=10_000 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2_048 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Tuple=2_048 , _UpperCAmelCase : str=4 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : List[Any]=256 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[str]=6_000 , _UpperCAmelCase : Optional[Any]=1_024 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Any=(5, 5) , _UpperCAmelCase : int=1_024 , _UpperCAmelCase : str=80 , _UpperCAmelCase : Any=1 , **_UpperCAmelCase : Tuple , ):
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(_UpperCAmelCase )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 7 | 0 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ : str = logging.get_logger(__name__)
def UpperCamelCase ( _UpperCAmelCase : str , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
_lowercase : List[Any] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def UpperCamelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_lowercase : Tuple = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
_lowercase : Dict = in_proj_weight[
: encoder_config.hidden_size, :
]
_lowercase : Dict = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_lowercase : Optional[int] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCamelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[int] = dct.pop(_snake_case )
_lowercase : Union[str, Any] = val
def UpperCamelCase ( _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
if "handwritten" in checkpoint_url:
_lowercase : Optional[int] = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowercase : int = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
_lowercase : Optional[int] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("RGB" )
return im
@torch.no_grad()
def UpperCamelCase ( _UpperCAmelCase : int , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
_lowercase : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_snake_case )
_lowercase : List[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_lowercase : Optional[Any] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
_lowercase : List[str] = 1024
_lowercase : int = 4096
_lowercase : int = 24
_lowercase : Optional[Any] = 16
_lowercase : List[str] = 1024
else:
raise ValueError("Should either find \'base\' or \'large\' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowercase : Any = False
_lowercase : Optional[Any] = "relu"
_lowercase : Union[str, Any] = 1024
_lowercase : Any = True
_lowercase : Union[str, Any] = False
_lowercase : List[str] = False
# load HuggingFace model
_lowercase : int = ViTModel(_snake_case , add_pooling_layer=_snake_case )
_lowercase : Any = TrOCRForCausalLM(_snake_case )
_lowercase : List[Any] = VisionEncoderDecoderModel(encoder=_snake_case , decoder=_snake_case )
model.eval()
# load state_dict of original model, rename some keys
_lowercase : List[str] = torch.hub.load_state_dict_from_url(_snake_case , map_location="cpu" , check_hash=_snake_case )["model"]
_lowercase : Optional[Any] = create_rename_keys(_snake_case , _snake_case )
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
read_in_q_k_v(_snake_case , _snake_case )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_lowercase : Optional[Any] = state_dict.pop(_snake_case )
if key.startswith("decoder" ) and "output_projection" not in key:
_lowercase : Union[str, Any] = val
else:
_lowercase : Union[str, Any] = val
# load state dict
model.load_state_dict(_snake_case )
# Check outputs on an image
_lowercase : int = ViTImageProcessor(size=encoder_config.image_size )
_lowercase : Optional[int] = RobertaTokenizer.from_pretrained("roberta-large" )
_lowercase : Optional[int] = TrOCRProcessor(_snake_case , _snake_case )
_lowercase : int = processor(images=prepare_img(_snake_case ) , return_tensors="pt" ).pixel_values
# verify logits
_lowercase : Dict = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_lowercase : str = model(pixel_values=_snake_case , decoder_input_ids=_snake_case )
_lowercase : Optional[Any] = outputs.logits
_lowercase : Union[str, Any] = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
_lowercase : str = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
_lowercase : Union[str, Any] = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
_lowercase : Dict = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
_lowercase : int = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _snake_case , atol=1e-3 ), "First elements of logits not as expected"
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_snake_case )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCamelCase_ : str = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
UpperCamelCase_ : int = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 461 |
"""simple docstring"""
from manim import *
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Rectangle(height=0.5 , width=0.5 )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A = Rectangle(height=0.25 , width=0.25 )
_A = [mem.copy() for i in range(6 )]
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('CPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(4 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('GPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Model' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
_A = []
_A = []
for i, rect in enumerate(_UpperCAmelCase ):
_A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
_A = [meta_mem.copy() for i in range(6 )]
_A = [meta_mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Disk' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
_A = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
_A = Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
_A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_A = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
_A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_A = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_A = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_A = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_A = a_c
_A = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
_A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait()
| 7 | 0 |
'''simple docstring'''
_lowerCAmelCase = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowerCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowerCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 565 |
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def _snake_case ( ) -> None:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 7 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files', [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
], )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md', 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md', 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json', 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
_lowerCAmelCase = DatasetInfosDict.from_directory(_snake_case )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
'dataset_info', [
DatasetInfo(),
DatasetInfo(
description='foo', features=Features({'a': Value('int32' )} ), builder_name='builder', config_name='config', version='1.0.0', splits=[{'name': 'train'}], download_size=4_2, ),
], )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = str(_snake_case )
dataset_info.write_to_directory(_snake_case )
_lowerCAmelCase = DatasetInfo.from_directory(_snake_case )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_snake_case, 'dataset_info.json' ) )
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = DatasetInfo(
description='foo', citation='bar', homepage='https://foo.bar', license='CC0', features=Features({'a': Value('int32' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='builder', config_name='config', version='1.0.0', splits=[{'name': 'train', 'num_examples': 4_2}], download_checksums={}, download_size=1_3_3_7, post_processing_size=4_4_2, dataset_size=1_2_3_4, size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4, )
_lowerCAmelCase = dataset_info._to_yaml_dict()
assert sorted(_snake_case ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_lowerCAmelCase = yaml.safe_dump(_snake_case )
_lowerCAmelCase = yaml.safe_load(_snake_case )
assert dataset_info_yaml_dict == reloaded
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = DatasetInfo()
_lowerCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict', [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo', features=Features({'a': Value('int32' )} ), builder_name='builder', config_name='config', version='1.0.0', splits=[{'name': 'train'}], download_size=4_2, )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=4_2 ),
'v2': DatasetInfo(dataset_size=1_3_3_7 ),
} ),
], )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = str(_snake_case )
dataset_infos_dict.write_to_directory(_snake_case )
_lowerCAmelCase = DatasetInfosDict.from_directory(_snake_case )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_snake_case, 'README.md' ) )
| 589 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
a = logging.getLogger(__name__)
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''whether to use adafactor'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(default=__lowerCAmelCase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[str] = field(
default='''linear''' , metadata={'''help''': f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 7 | 0 |
import re
import subprocess
import sys
snake_case_ : int = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
snake_case_ : List[str] = (
subprocess.check_output(f"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode("utf-8").split()
)
snake_case_ : int = "|".join(sys.argv[1:])
snake_case_ : int = re.compile(Rf"^({joined_dirs}).*?\.py$")
snake_case_ : Optional[int] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 488 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 7 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
a_ = tau * frequency / samplerate
a_ = sin(_snake_case )
a_ = cos(_snake_case )
a_ = _sin / (2 * q_factor)
a_ = (1 - _cos) / 2
a_ = 1 - _cos
a_ = 1 + alpha
a_ = -2 * _cos
a_ = 1 - alpha
a_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
a_ = tau * frequency / samplerate
a_ = sin(_snake_case )
a_ = cos(_snake_case )
a_ = _sin / (2 * q_factor)
a_ = (1 + _cos) / 2
a_ = -1 - _cos
a_ = 1 + alpha
a_ = -2 * _cos
a_ = 1 - alpha
a_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
a_ = tau * frequency / samplerate
a_ = sin(_snake_case )
a_ = cos(_snake_case )
a_ = _sin / (2 * q_factor)
a_ = _sin / 2
a_ = 0
a_ = -ba
a_ = 1 + alpha
a_ = -2 * _cos
a_ = 1 - alpha
a_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
a_ = tau * frequency / samplerate
a_ = sin(_snake_case )
a_ = cos(_snake_case )
a_ = _sin / (2 * q_factor)
a_ = 1 - alpha
a_ = -2 * _cos
a_ = 1 + alpha
a_ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
a_ = tau * frequency / samplerate
a_ = sin(_snake_case )
a_ = cos(_snake_case )
a_ = _sin / (2 * q_factor)
a_ = 10 ** (gain_db / 40)
a_ = 1 + alpha * big_a
a_ = -2 * _cos
a_ = 1 - alpha * big_a
a_ = 1 + alpha / big_a
a_ = -2 * _cos
a_ = 1 - alpha / big_a
a_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
a_ = tau * frequency / samplerate
a_ = sin(_snake_case )
a_ = cos(_snake_case )
a_ = _sin / (2 * q_factor)
a_ = 10 ** (gain_db / 40)
a_ = (big_a + 1) - (big_a - 1) * _cos
a_ = (big_a + 1) + (big_a - 1) * _cos
a_ = (big_a - 1) - (big_a + 1) * _cos
a_ = (big_a - 1) + (big_a + 1) * _cos
a_ = 2 * sqrt(_snake_case ) * alpha
a_ = big_a * (pmc + aaa)
a_ = 2 * big_a * mpc
a_ = big_a * (pmc - aaa)
a_ = ppmc + aaa
a_ = -2 * pmpc
a_ = ppmc - aaa
a_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
a_ = tau * frequency / samplerate
a_ = sin(_snake_case )
a_ = cos(_snake_case )
a_ = _sin / (2 * q_factor)
a_ = 10 ** (gain_db / 40)
a_ = (big_a + 1) - (big_a - 1) * _cos
a_ = (big_a + 1) + (big_a - 1) * _cos
a_ = (big_a - 1) - (big_a + 1) * _cos
a_ = (big_a - 1) + (big_a + 1) * _cos
a_ = 2 * sqrt(_snake_case ) * alpha
a_ = big_a * (ppmc + aaa)
a_ = -2 * big_a * pmpc
a_ = big_a * (ppmc - aaa)
a_ = pmc + aaa
a_ = 2 * mpc
a_ = pmc - aaa
a_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 483 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : UNetaDModel
UpperCAmelCase : KarrasVeScheduler
def __init__( self : Any , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Optional[Any] , ):
_A = self.unet.config.sample_size
_A = (batch_size, 3, img_size, img_size)
_A = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_A = self.scheduler.schedule[t]
_A = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_A , _A = self.scheduler.add_noise_to_input(_UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_A = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_A = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_A = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_A = self.scheduler.step_correct(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , step_output.prev_sample , step_output['derivative'] , )
_A = step_output.prev_sample
_A = (sample / 2 + 0.5).clamp(0 , 1 )
_A = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 7 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class UpperCAmelCase_ ( __lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ = '''data2vec-text'''
def __init__( self : List[str] , UpperCAmelCase : List[str]=3_0522 , UpperCAmelCase : str=768 , UpperCAmelCase : List[Any]=12 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : Union[str, Any]=3072 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : str=512 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : str=1e-12 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : Any=2 , UpperCAmelCase : Optional[Any]="absolute" , UpperCAmelCase : Dict=True , UpperCAmelCase : str=None , **UpperCAmelCase : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase : List[str] =vocab_size
lowercase : int =hidden_size
lowercase : Union[str, Any] =num_hidden_layers
lowercase : Optional[int] =num_attention_heads
lowercase : Union[str, Any] =hidden_act
lowercase : str =intermediate_size
lowercase : Dict =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[int] =max_position_embeddings
lowercase : List[str] =type_vocab_size
lowercase : List[Any] =initializer_range
lowercase : int =layer_norm_eps
lowercase : int =position_embedding_type
lowercase : Any =use_cache
lowercase : Union[str, Any] =classifier_dropout
class UpperCAmelCase_ ( __lowerCAmelCase ):
"""simple docstring"""
@property
def A__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if self.task == "multiple-choice":
lowercase : Optional[int] ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Dict ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 94 |
"""simple docstring"""
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = None
_A = None
_A = graph
self._normalize_graph(_UpperCAmelCase , _UpperCAmelCase )
_A = len(_UpperCAmelCase )
_A = None
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_UpperCAmelCase ) > 1 or len(_UpperCAmelCase ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ):
_A = algorithm(self )
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Union[str, Any] ):
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
self._algorithm()
_A = True
def lowerCAmelCase_ ( self : int ):
pass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Any ):
super().__init__(_UpperCAmelCase )
# use this to save your result
_A = -1
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[Any] ):
super().__init__(_UpperCAmelCase )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def lowerCAmelCase_ ( self : Dict ):
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(_UpperCAmelCase ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(_UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_UpperCAmelCase ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_UpperCAmelCase , _UpperCAmelCase )
self.relabel(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int ):
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a = [0]
a = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 7 | 0 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = GPTSanJapaneseTokenizer
lowerCAmelCase = False
lowerCAmelCase = {'''do_clean_text''': False, '''add_prefix_space''': False}
def _UpperCAmelCase ( self ) -> Any:
super().setUp()
# fmt: off
A = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
A = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
A = {"""unk_token""": """<unk>"""}
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(_UpperCAmelCase ) )
def _UpperCAmelCase ( self , **a__ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _UpperCAmelCase ( self , a__ ) -> str:
A = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
A = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def _UpperCAmelCase ( self , a__ ) -> List[Any]:
A , A = self.get_input_output_texts(_UpperCAmelCase )
A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
A = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def _UpperCAmelCase ( self ) -> Any:
pass # TODO add if relevant
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass # TODO add if relevant
def _UpperCAmelCase ( self ) -> str:
pass # TODO add if relevant
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = self.get_tokenizer()
# Testing tokenization
A = """こんにちは、世界。 こんばんは、㔺界。"""
A = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
A = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids without special tokens
A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids with special tokens
A = tokens + [tokenizer.unk_token]
A = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
A = self.get_tokenizer()
# Testing tokenization
A = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
A = """こんにちは、、、、世界。こんばんは、、、、世界。"""
A = tokenizer.encode(_UpperCAmelCase )
A = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
A = """こんにちは、世界。"""
A = """こんばんは、㔺界。😀"""
A = """こんにちは、世界。こんばんは、世界。😀"""
A = tokenizer.encode(prefix_text + input_text )
A = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
A = tokenizer.encode(_UpperCAmelCase , prefix_text=_UpperCAmelCase )
A = tokenizer.decode(_UpperCAmelCase )
A = tokenizer.decode(_UpperCAmelCase )
A = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
A = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
A = """こんにちは、世界。"""
A = """こんばんは、㔺界。😀"""
A = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
A = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
A = [1] + [0] * (len_prefix + len_text + 1)
A = [1] * (len_prefix + len_text + 1) + [0]
A = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
A = tokenizer(prefix_text + input_text ).token_type_ids
A = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
A = tokenizer(_UpperCAmelCase , prefix_text=_UpperCAmelCase ).token_type_ids
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
A = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
A = tokenizer.encode("""あンいワ""" )
A = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
A = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , tokenizer.decode(_UpperCAmelCase ) )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , tokenizer.decode(_UpperCAmelCase ) )
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def _UpperCAmelCase ( self ) -> List[str]:
A = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
A = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
A = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase )
A = tokenizer.batch_encode_plus(_UpperCAmelCase , padding=_UpperCAmelCase )
# fmt: off
A = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
A = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
A = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , _UpperCAmelCase )
self.assertListEqual(x_token.token_type_ids , _UpperCAmelCase )
self.assertListEqual(x_token.attention_mask , _UpperCAmelCase )
self.assertListEqual(x_token_a.input_ids , _UpperCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , _UpperCAmelCase )
self.assertListEqual(x_token_a.attention_mask , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def _UpperCAmelCase ( self ) -> List[str]:
# tokenizer has no padding token
pass
| 641 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = SpeechTaTokenizer
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[int] = True
def lowerCAmelCase_ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
_A = SpeechTaTokenizer(_UpperCAmelCase )
_A = AddedToken('<mask>' , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )
_A = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
_A = 'this is a test'
_A = 'this is a test'
return input_text, output_text
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict=20 , _UpperCAmelCase : str=5 ):
_A , _A = self.get_input_output_texts(_UpperCAmelCase )
_A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_A = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = '<pad>'
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(_UpperCAmelCase ) , 81 )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase_ ( self : Any ):
_A = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_A = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_A = tokenizer.add_tokens(_UpperCAmelCase )
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size + len(_UpperCAmelCase ) )
_A = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_A = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_A = tokenizer.add_special_tokens(_UpperCAmelCase )
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size_a + len(_UpperCAmelCase ) )
_A = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Any ):
pass
def lowerCAmelCase_ ( self : Dict ):
_A = self.get_tokenizer()
_A = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_A = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
# Use custom sequence because this tokenizer does not handle numbers.
_A = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_A = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_UpperCAmelCase , )
| 7 | 0 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase_ : Dict = pytest.mark.integration
UpperCAmelCase_ : Optional[Any] = {'comet'}
UpperCAmelCase_ : Any = importlib.util.find_spec('fairseq') is not None
UpperCAmelCase_ : Tuple = {'code_eval'}
UpperCAmelCase_ : int = os.name == 'nt'
UpperCAmelCase_ : Optional[Any] = {'bertscore', 'frugalscore', 'perplexity'}
UpperCAmelCase_ : List[str] = importlib.util.find_spec('transformers') is not None
def _lowercase ( UpperCamelCase__ : List[Any] ):
@wraps(_snake_case )
def wrapper(self : List[str], UpperCamelCase__ : str ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self, _snake_case )
return wrapper
def _lowercase ( UpperCamelCase__ : Any ):
@wraps(_snake_case )
def wrapper(self : Union[str, Any], UpperCamelCase__ : Optional[int] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self, _snake_case )
return wrapper
def _lowercase ( UpperCamelCase__ : Union[str, Any] ):
@wraps(_snake_case )
def wrapper(self : Union[str, Any], UpperCamelCase__ : List[str] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self, _snake_case )
return wrapper
def _lowercase ( ):
__A : Any = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@local
class _lowerCamelCase ( parameterized.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = {}
__lowercase : Optional[Any] = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : List[Any] = '[...]'
__A : Union[str, Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , _UpperCAmelCase ) ).module_path )
__A : Dict = datasets.load.import_main_class(metric_module.__name__ , dataset=_UpperCAmelCase )
# check parameters
__A : Tuple = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_UpperCAmelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
__A : Union[str, Any] = doctest.testmod(_UpperCAmelCase , verbose=_UpperCAmelCase , raise_on_error=_UpperCAmelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : Tuple = '[...]'
__A : Optional[int] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , _UpperCAmelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
__A : List[str] = doctest.testmod(_UpperCAmelCase , verbose=_UpperCAmelCase , raise_on_error=_UpperCAmelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def snake_case__ ( self , __lowercase , __lowercase ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_UpperCAmelCase ):
yield
else:
yield
@contextmanager
def snake_case__ ( self ):
"""simple docstring"""
def load_local_metric(__lowercase , *__lowercase , **__lowercase ):
return load_metric(os.path.join('metrics' , _UpperCAmelCase ) , *_UpperCAmelCase , **_UpperCAmelCase )
with patch('datasets.load_metric' ) as mock_load_metric:
__A : Tuple = load_local_metric
yield
@classmethod
def snake_case__ ( cls , __lowercase ):
"""simple docstring"""
def wrapper(__lowercase ):
__A : Tuple = contextmanager(_UpperCAmelCase )
__A : Dict = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def _lowercase ( UpperCamelCase__ : int ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv', '', '' ) # handle pytest cli flags
class _lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
assert len(input_dict['input_ids'] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
__A : Tuple = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def _lowercase ( UpperCamelCase__ : Tuple ):
import torch
def bert_cos_score_idf(UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[Any], *UpperCamelCase__ : Union[str, Any], **UpperCamelCase__ : Optional[Any] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_snake_case ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
__A : Optional[int] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def _lowercase ( UpperCamelCase__ : List[str] ):
def load_from_checkpoint(UpperCamelCase__ : str ):
class _lowerCamelCase :
'''simple docstring'''
def snake_case__ ( self , __lowercase , *__lowercase , **__lowercase ):
"""simple docstring"""
assert len(_UpperCAmelCase ) == 2
__A : Optional[int] = [0.1_9, 0.9_2]
return scores, sum(_UpperCAmelCase ) / len(_UpperCAmelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
__A : List[str] = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
__A : Optional[int] = load_from_checkpoint
yield
def _lowercase ( ):
__A : str = load_metric(os.path.join('metrics', 'seqeval' ) )
__A : Tuple = 'ERROR'
__A : Tuple = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(_snake_case, match=re.escape(_snake_case ) ):
metric.compute(predictions=[], references=[], scheme=_snake_case )
| 365 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 7 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__magic_name__ : Any = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__magic_name__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 |
"""simple docstring"""
import argparse
a = '''docs/source/_static/js/custom.js'''
def _snake_case ( _snake_case : Dict ) -> Any:
'''simple docstring'''
with open(_snake_case , encoding='utf-8' , newline='\n' ) as f:
_A = f.readlines()
_A = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
_A = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(_snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
a = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
'''simple docstring'''
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__lowerCAmelCase = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
_snake_case = True
while ask_again:
_snake_case = input(_snake_case )
try:
if default is not None and len(_snake_case ) == 0:
return default
return convert_value(_snake_case ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_snake_case )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=[] , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0 ):
_snake_case = BulletMenu(_snake_case , _snake_case )
_snake_case = menu.run(default_choice=_snake_case )
return convert_value(_snake_case ) if convert_value is not None else result
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = int(_snake_case )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = int(_snake_case )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = int(_snake_case )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = int(_snake_case )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = int(_snake_case )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
return {"yes": True, "no": False}[value.lower()]
class _lowerCAmelCase ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
_snake_case = super()._format_usage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_snake_case = usage.replace("""<command> [<args>] """ , """""" )
return usage | 585 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''vit_mae'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Optional[int]=3_072 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : Optional[Any]=224 , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=512 , _UpperCAmelCase : int=8 , _UpperCAmelCase : List[Any]=2_048 , _UpperCAmelCase : Optional[Any]=0.75 , _UpperCAmelCase : List[str]=False , **_UpperCAmelCase : Union[str, Any] , ):
super().__init__(**_UpperCAmelCase )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = decoder_num_attention_heads
_A = decoder_hidden_size
_A = decoder_num_hidden_layers
_A = decoder_intermediate_size
_A = mask_ratio
_A = norm_pix_loss
| 7 | 0 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _SCREAMING_SNAKE_CASE( yaml.SafeLoader ):
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ :Optional[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE__ :str = [tuple(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE__ :Any = Counter(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def __lowerCamelCase ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any]=False ) -> Tuple:
SCREAMING_SNAKE_CASE__ :List[Any] = super().construct_mapping(_UpperCAmelCase , deep=_UpperCAmelCase )
self._check_no_duplicates_on_constructed_node(_UpperCAmelCase )
return mapping
def lowerCamelCase ( UpperCAmelCase__ : str ) -> Tuple[Optional[str], str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = full_content[1:].index('---' ) + 1
SCREAMING_SNAKE_CASE__ :Union[str, Any] = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_snake_case )
class _SCREAMING_SNAKE_CASE( __lowerCAmelCase ):
A_ : Union[str, Any] = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def __lowerCamelCase ( cls : int , UpperCamelCase_ : Path ) -> Union[str, Any]:
with open(_UpperCAmelCase , encoding='utf-8' ) as readme_file:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Any = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_UpperCAmelCase )
else:
return cls()
def __lowerCamelCase ( self : str , UpperCamelCase_ : Path ) -> Optional[int]:
if path.exists():
with open(_UpperCAmelCase , encoding='utf-8' ) as readme_file:
SCREAMING_SNAKE_CASE__ :Dict = readme_file.read()
else:
SCREAMING_SNAKE_CASE__ :Any = None
SCREAMING_SNAKE_CASE__ :Optional[Any] = self._to_readme(_UpperCAmelCase )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(_UpperCAmelCase )
def __lowerCamelCase ( self : int , UpperCamelCase_ : Optional[str] = None ) -> List[str]:
if readme_content is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = _split_yaml_from_readme(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
SCREAMING_SNAKE_CASE__ :Optional[Any] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def __lowerCamelCase ( cls : List[str] , UpperCamelCase_ : str ) -> List[str]:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = yaml.load(_UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE__ :Tuple = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_UpperCAmelCase )
def __lowerCamelCase ( self : Dict ) -> Optional[Any]:
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_UpperCAmelCase , allow_unicode=_UpperCAmelCase , encoding='utf-8' , ).decode('utf-8' )
UpperCamelCase_ = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCamelCase_ = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
UpperCamelCase_ = ap.parse_args()
UpperCamelCase_ = Path(args.readme_filepath)
UpperCamelCase_ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 209 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
a = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _snake_case ( _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
_A = torch.load(_snake_case , map_location='cpu' )
return sd
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Tuple=rename_keys_prefix ) -> List[str]:
'''simple docstring'''
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1] )
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def _snake_case ( _snake_case : List[str] , _snake_case : Dict ) -> Dict:
'''simple docstring'''
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = 'pretraining'
if "vcr" in checkpoint_path:
_A = {'visual_embedding_dim': 5_12}
elif "vqa_advanced" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
elif "vqa" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
elif "nlvr" in checkpoint_path:
_A = {'visual_embedding_dim': 10_24}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
_A = {'visual_embedding_dim': 5_12}
_A = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
_A = 'vqa_advanced'
elif "vqa" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48, 'num_labels': 31_29}
_A = 'vqa'
elif "nlvr" in checkpoint_path:
_A = {
'visual_embedding_dim': 10_24,
'num_labels': 2,
}
_A = 'nlvr'
_A = VisualBertConfig(**_snake_case )
# Load State Dict
_A = load_state_dict(_snake_case )
_A = get_new_dict(_snake_case , _snake_case )
if model_type == "pretraining":
_A = VisualBertForPreTraining(_snake_case )
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(_snake_case )
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(_snake_case )
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(_snake_case )
model.load_state_dict(_snake_case )
# Save Checkpoints
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
a = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 7 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ : Any = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : List[str] = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : List[str] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[Any] = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCamelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 461 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
for param in module.parameters():
_A = False
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_A = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = plt.imshow(_snake_case )
fig.axes.get_xaxis().set_visible(_snake_case )
fig.axes.get_yaxis().set_visible(_snake_case )
plt.show()
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = datetime.now()
_A = current_time.strftime('%H:%M:%S' )
return timestamp
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase = 6 ) -> int:
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Union[str, Any] = None
self.create_linked_list(_UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : int = Node()
lowerCAmelCase__ : Optional[int] = current_node
lowerCAmelCase__ : Optional[int] = current_node
lowerCAmelCase__ : Dict = current_node
for _ in range(1 ,_UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = Node()
lowerCAmelCase__ : Optional[int] = current_node
lowerCAmelCase__ : Tuple = previous_node
lowerCAmelCase__ : Tuple = current_node
lowerCAmelCase__ : List[Any] = self.front
lowerCAmelCase__ : Dict = previous_node
def UpperCAmelCase_ ( self ) -> Tuple:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.check_can_perform_operation()
return self.front.data if self.front else None
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[Any]:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase__ : List[Any] = self.rear.next
if self.rear:
lowerCAmelCase__ : Dict = data
def UpperCAmelCase_ ( self ) -> Dict:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase__ : Dict = self.front.data
lowerCAmelCase__ : Dict = None
return data
lowerCAmelCase__ : Optional[int] = self.front
lowerCAmelCase__ : Optional[int] = old_front.next
lowerCAmelCase__ : str = old_front.data
lowerCAmelCase__ : Any = None
return data
def UpperCAmelCase_ ( self ) -> Tuple:
if self.is_empty():
raise Exception("""Empty Queue""" )
def UpperCAmelCase_ ( self ) -> Any:
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ) -> List[str]:
lowerCAmelCase__ : str = None
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Optional[int] = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 565 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
UpperCAmelCase : Optional[int] = '''ViTImageProcessor'''
UpperCAmelCase : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Tuple , _UpperCAmelCase : int=None , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : Dict ):
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[Any] , _UpperCAmelCase : int=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Union[str, Any] ):
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
_A = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
_A = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_A = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self : Tuple ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 7 | 0 |
"""simple docstring"""
from __future__ import annotations
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = sorted(numsa + numsa )
_lowerCAmelCase , _lowerCAmelCase = divmod(len(_snake_case ), 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Tuple = [float(x) for x in input("""Enter the elements of first array: """).split()]
a__ : Optional[Any] = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 589 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def _snake_case ( _snake_case : int ) -> datetime:
'''simple docstring'''
_A = year % 19
_A = year % 4
_A = year % 7
_A = math.floor(year / 1_00 )
_A = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_A = leap_day_inhibits / 4
_A = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_A = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_A = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_A = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 18 )
else:
return datetime(_snake_case , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
a = '''will be''' if year > datetime.now().year else '''was'''
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 7 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class snake_case_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : str=13 , __magic_name__ : Dict=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Dict=False , __magic_name__ : List[str]=True , __magic_name__ : Tuple=99 , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Tuple=5 , __magic_name__ : str=4 , __magic_name__ : Optional[int]=64 , __magic_name__ : Any="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Union[str, Any]=512 , __magic_name__ : Optional[int]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Dict=0.02 , __magic_name__ : Tuple=3 , __magic_name__ : List[str]=4 , __magic_name__ : List[Any]=None , __magic_name__ : str=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Any=2 , __magic_name__ : List[str]=2 , __magic_name__ : Dict=4 , __magic_name__ : str=1 , ) -> Dict:
lowerCamelCase_ : Tuple = parent
lowerCamelCase_ : List[Any] = batch_size
lowerCamelCase_ : Optional[int] = seq_length
lowerCamelCase_ : Dict = is_training
lowerCamelCase_ : List[Any] = use_input_mask
lowerCamelCase_ : List[str] = use_token_type_ids
lowerCamelCase_ : int = use_labels
lowerCamelCase_ : int = vocab_size
lowerCamelCase_ : List[str] = hidden_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : Dict = num_attention_heads
lowerCamelCase_ : Any = intermediate_size
lowerCamelCase_ : Tuple = hidden_act
lowerCamelCase_ : int = hidden_dropout_prob
lowerCamelCase_ : List[str] = attention_probs_dropout_prob
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : List[Any] = type_sequence_label_size
lowerCamelCase_ : Dict = initializer_range
lowerCamelCase_ : Optional[int] = num_labels
lowerCamelCase_ : int = num_choices
lowerCamelCase_ : int = scope
lowerCamelCase_ : List[Any] = q_groups
lowerCamelCase_ : Tuple = k_groups
lowerCamelCase_ : Union[str, Any] = v_groups
lowerCamelCase_ : Any = post_attention_groups
lowerCamelCase_ : Dict = intermediate_groups
lowerCamelCase_ : Dict = output_groups
def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
lowerCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : str = None
if self.use_input_mask:
lowerCamelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : str = None
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : Optional[Any] = None
if self.use_labels:
lowerCamelCase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
lowerCamelCase_ : List[str] = SqueezeBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase_ : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] ) -> Optional[int]:
lowerCamelCase_ : Dict = SqueezeBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCamelCase_ : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : str ) -> Tuple:
lowerCamelCase_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCamelCase_ : List[Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : int ) -> Optional[int]:
lowerCamelCase_ : Dict = self.num_labels
lowerCamelCase_ : List[str] = SqueezeBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCamelCase_ : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] ) -> Optional[int]:
lowerCamelCase_ : Optional[int] = self.num_labels
lowerCamelCase_ : int = SqueezeBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCamelCase_ : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : int ) -> Optional[Any]:
lowerCamelCase_ : Tuple = self.num_choices
lowerCamelCase_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCamelCase_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Any = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
lowerCamelCase_ : Optional[int] = self.prepare_config_and_inputs()
((lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_)) : Optional[int] = config_and_inputs
lowerCamelCase_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = True
lowerCamelCase = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
lowerCamelCase_ : int = SqueezeBertModelTester(self )
lowerCamelCase_ : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , dim=37 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_UpperCAmelCase )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Tuple = SqueezeBertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
lowerCamelCase_ : Any = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
lowerCamelCase_ : Tuple = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
lowerCamelCase_ : List[str] = model(_UpperCAmelCase )[0]
lowerCamelCase_ : Tuple = torch.Size((1, 3) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowerCamelCase_ : Any = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-4 ) )
| 488 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''gpt_bigcode'''
UpperCAmelCase : str = ['''past_key_values''']
UpperCAmelCase : Dict = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , _UpperCAmelCase : Dict=50_257 , _UpperCAmelCase : List[Any]=1_024 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : str="gelu_pytorch_tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=50_256 , _UpperCAmelCase : Dict=50_256 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Any , ):
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = n_inner
_A = activation_function
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = scale_attn_weights
_A = use_cache
_A = attention_softmax_in_fpaa
_A = scale_attention_softmax_in_fpaa
_A = multi_query
_A = bos_token_id
_A = eos_token_id
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 7 | 0 |
import random
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = a[left_index]
a_ = left_index + 1
for j in range(left_index + 1 , _snake_case ):
if a[j] < pivot:
a_ , a_ = a[i], a[j]
i += 1
a_ , a_ = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if left < right:
a_ = random.randint(_snake_case , right - 1 )
a_ , a_ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
a_ = partition(_snake_case , _snake_case , _snake_case )
quick_sort_random(
_snake_case , _snake_case , _snake_case ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_snake_case , pivot_index + 1 , _snake_case ) # recursive quicksort to the right of the pivot point
def lowerCamelCase_ ( ):
"""simple docstring"""
a_ = input("""Enter numbers separated by a comma:\n""" ).strip()
a_ = [int(_snake_case ) for item in user_input.split(""",""" )]
quick_sort_random(_snake_case , 0 , len(_snake_case ) )
print(_snake_case )
if __name__ == "__main__":
main() | 483 |
"""simple docstring"""
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
return " ".join(
''.join(word[::-1] ) if len(_snake_case ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : Any , ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Union[str, Any] =13
lowercase : Dict =7
lowercase : Optional[Any] =True
lowercase : Dict =True
lowercase : Optional[int] =True
lowercase : int =99
lowercase : int =32
lowercase : int =2
lowercase : Union[str, Any] =4
lowercase : Dict =37
lowercase : Optional[int] ='''gelu'''
lowercase : int =0.1
lowercase : Optional[int] =0.1
lowercase : int =512
lowercase : List[Any] =16
lowercase : Any =2
lowercase : int =0.0_2
lowercase : Any =3
lowercase : Dict =4
lowercase : Union[str, Any] =None
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Dict =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Optional[Any] =None
lowercase : int =None
lowercase : Optional[Any] =None
if self.use_labels:
lowercase : str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Tuple =ids_tensor([self.batch_size] , self.num_choices )
lowercase : str =EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : int =self.prepare_config_and_inputs()
lowercase : Union[str, Any] =True
lowercase : Optional[int] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A__ ( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : List[str] =TFEsmModel(config=_UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Tuple =model(_UpperCAmelCase )
lowercase : List[Any] =[input_ids, input_mask]
lowercase : Dict =model(_UpperCAmelCase )
lowercase : Optional[int] =model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , ) -> Any:
'''simple docstring'''
lowercase : int =True
lowercase : Any =TFEsmModel(config=_UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowercase : str =model(_UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Optional[Any] =model(_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase )
# Also check the case where encoder outputs are not passed
lowercase : List[Any] =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] =TFEsmForMaskedLM(config=_UpperCAmelCase )
lowercase : Any =model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Union[str, Any] =TFEsmForTokenClassification(config=_UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Tuple =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =TFEsmModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase )
def A__ ( self : List[Any] ) -> int:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def A__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[int] =TFEsmModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def A__ ( self : str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
pass
def A__ ( self : Dict ) -> int:
'''simple docstring'''
lowercase , lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Any =model_class(_UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowercase : Optional[int] =model.get_bias()
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
for k, v in name.items():
assert isinstance(_UpperCAmelCase , tf.Variable )
else:
lowercase : Union[str, Any] =model.get_output_embeddings()
assert x is None
lowercase : Dict =model.get_bias()
assert name is None
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowercase : int =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : Union[str, Any] =model(_UpperCAmelCase )[0]
lowercase : str =[1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _UpperCAmelCase )
# compare the actual values for a slice.
lowercase : Dict =tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowercase : int =tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase : List[Any] =model(_UpperCAmelCase )[0]
# compare the actual values for a slice.
lowercase : List[Any] =tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 94 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (KDPMaDiscreteScheduler,)
UpperCAmelCase : Any = 10
def lowerCAmelCase_ ( self : Dict , **_UpperCAmelCase : Optional[Any] ):
_A = {
'num_train_timesteps': 1_100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_UpperCAmelCase )
return config
def lowerCAmelCase_ ( self : Any ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type='v_prediction' )
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4E-0_7 ) < 1E-2
assert abs(result_mean.item() - 6.1_1_1_2E-1_0 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def lowerCAmelCase_ ( self : Optional[Any] ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def lowerCAmelCase_ ( self : Any ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
_A = self.dummy_model()
_A = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if str(_UpperCAmelCase ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 7 | 0 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _lowerCAmelCase ( UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any]=10 ) -> Optional[int]:
"""simple docstring"""
A = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _lowerCAmelCase ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any]=10 ) -> List[str]:
"""simple docstring"""
A = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(_snake_case , """schedule.bin""" )
torch.save(scheduler.state_dict() , _snake_case )
A = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self , a__ , a__ , a__ ) -> Union[str, Any]:
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCAmelCase )
A = torch.tensor([0.4, 0.2, -0.5] )
A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
A = criterion(_UpperCAmelCase , _UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCAmelCase )
A = torch.tensor([0.4, 0.2, -0.5] )
A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_UpperCAmelCase , weight_decay=0.0 , relative_step=_UpperCAmelCase , scale_parameter=_UpperCAmelCase , warmup_init=_UpperCAmelCase , )
for _ in range(1000 ):
A = criterion(_UpperCAmelCase , _UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
lowerCAmelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowerCAmelCase = 1_0
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__=None ) -> List[str]:
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase , msg=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
A = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1e-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
A , A = data
A = scheduler_func(self.optimizer , **_UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A = unwrap_schedule(_UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
_UpperCAmelCase , _UpperCAmelCase , tol=1e-2 , msg=f'failed for {scheduler_func} in normal scheduler' , )
A = scheduler_func(self.optimizer , **_UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_UpperCAmelCase ) # wrap to test picklability of the schedule
A = unwrap_and_save_reload_schedule(_UpperCAmelCase , self.num_steps )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase , msg=f'failed for {scheduler_func} in save and reload' )
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , a__ ) -> int:
A = fn
def __call__( self , *a__ , **a__ ) -> Any:
return self.fn(*_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def _UpperCAmelCase ( self , a__ ) -> Tuple:
A = list(map(self , scheduler.lr_lambdas ) )
| 641 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any]=10 ) -> Optional[int]:
'''simple docstring'''
_A = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Union[str, Any]=10 ) -> List[str]:
'''simple docstring'''
_A = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(_snake_case , 'schedule.bin' )
torch.save(scheduler.state_dict() , _snake_case )
_A = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
_A = criterion(_UpperCAmelCase , _UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCAmelCase_ ( self : int ):
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_UpperCAmelCase , weight_decay=0.0 , relative_step=_UpperCAmelCase , scale_parameter=_UpperCAmelCase , warmup_init=_UpperCAmelCase , )
for _ in range(1_000 ):
_A = criterion(_UpperCAmelCase , _UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCAmelCase : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCAmelCase : Dict = 10
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=None ):
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase , msg=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_A = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_A = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_A , _A = data
_A = scheduler_func(self.optimizer , **_UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_A = unwrap_schedule(_UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
_UpperCAmelCase , _UpperCAmelCase , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
_A = scheduler_func(self.optimizer , **_UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_UpperCAmelCase ) # wrap to test picklability of the schedule
_A = unwrap_and_save_reload_schedule(_UpperCAmelCase , self.num_steps )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase , msg=F'''failed for {scheduler_func} in save and reload''' )
class lowercase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ):
_A = fn
def __call__( self : Tuple , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[str] ):
return self.fn(*_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Any ):
_A = list(map(self , scheduler.lr_lambdas ) )
| 7 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
__A : Dict = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__A : Optional[int] = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def snake_case__ ( self ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
__A : str = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__A : int = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
def snake_case__ ( self ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase )
__A : List[Any] = FlaxBertModel.from_pretrained(_UpperCAmelCase )
__A : int = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowercase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def snake_case__ ( self ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase )
__A : List[Any] = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
__A : int = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowercase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def snake_case__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
__A : Dict = FlaxAutoModel.from_pretrained('bert-base' )
def snake_case__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__A : List[Any] = FlaxAutoModel.from_pretrained(_UpperCAmelCase , revision='aaaaaa' )
def snake_case__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
__A : List[str] = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def snake_case__ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(_UpperCAmelCase , 'Use `from_pt=True` to load this model' ):
__A : Any = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 365 |
"""simple docstring"""
import math
def _snake_case ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
if (
not isinstance(_snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def _snake_case ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
if (
not isinstance(_snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowercase__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] = StableUnCLIPPipeline
__lowerCAmelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
__lowerCAmelCase : int = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__lowerCAmelCase : Dict = False
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = 3_2
UpperCamelCase : Optional[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCamelCase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=_UpperCAmelCase , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
UpperCamelCase : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=_UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
UpperCamelCase : Tuple = DDPMScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1_0_0_0 , clip_sample=_UpperCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , )
# regular denoising components
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=_UpperCAmelCase )
UpperCamelCase : List[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
UpperCamelCase : List[str] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_UpperCAmelCase , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
UpperCamelCase : List[Any] = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_UpperCAmelCase , layers_per_block=1 , upcast_attention=_UpperCAmelCase , use_linear_projection=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCamelCase : str = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="""v_prediction""" , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = AutoencoderKL()
UpperCamelCase : Tuple = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def _a ( self , _A , _A=0 ):
'''simple docstring'''
if str(_UpperCAmelCase ).startswith("""mps""" ):
UpperCamelCase : Optional[Any] = torch.manual_seed(_UpperCAmelCase )
else:
UpperCamelCase : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCamelCase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=_UpperCAmelCase )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=_UpperCAmelCase )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" )
UpperCamelCase : Any = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCamelCase : Any = pipe("""anime turle""" , generator=_UpperCAmelCase , output_type="""np""" )
UpperCamelCase : Any = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def _a ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase : Tuple = StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa )
UpperCamelCase : List[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase : Tuple = pipe(
"""anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , )
UpperCamelCase : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 102 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = '''xmod'''
def __init__( self : str , _UpperCAmelCase : Optional[Any]=30_522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Dict=3_072 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Any=1E-1_2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Tuple=("en_XX",) , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
_A = pre_norm
_A = adapter_reduction_factor
_A = adapter_layer_norm
_A = adapter_reuse_layer_norm
_A = ln_before_adapter
_A = list(_UpperCAmelCase )
_A = default_language
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.task == "multiple-choice":
_A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 7 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['ConvNextFeatureExtractor']
__lowerCAmelCase = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure) | 585 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a = logging.get_logger(__name__)
a = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : Optional[Any] ):
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
_A = model
_A = kwargs.get('model_save_dir' , _UpperCAmelCase )
_A = kwargs.get('latest_model_name' , _UpperCAmelCase )
def __call__( self : Dict , **_UpperCAmelCase : List[Any] ):
_A = {k: np.array(_UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase )
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[Any]=None ):
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
_A = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : List[Any] ):
_A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_A = self.model_save_dir.joinpath(self.latest_model_name )
_A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_A = self.model_save_dir.joinpath(_UpperCAmelCase )
if src_path.exists():
_A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] , ):
if os.path.isfile(_UpperCAmelCase ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[Union[bool, str, None]] = None , _UpperCAmelCase : Optional[Union[str, None]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional["ort.SessionOptions"] = None , **_UpperCAmelCase : Union[str, Any] , ):
_A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase ):
_A = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
_A = Path(_UpperCAmelCase )
# load model from hub
else:
# download model
_A = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
_A = Path(_UpperCAmelCase ).parent
_A = Path(_UpperCAmelCase ).name
_A = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
return cls(model=_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : Tuple , ):
_A = None
if len(str(_UpperCAmelCase ).split('@' ) ) == 2:
_A , _A = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
| 7 | 0 |
'''simple docstring'''
import random
def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = num - 1
SCREAMING_SNAKE_CASE__ :List[Any] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ :List[str] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ :List[str] = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ :List[Any] = pow(_snake_case , _snake_case , _snake_case )
if v != 1:
SCREAMING_SNAKE_CASE__ :int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ :Any = i + 1
SCREAMING_SNAKE_CASE__ :int = (v**2) % num
return True
def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ :List[str] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_snake_case )
def lowerCamelCase ( UpperCAmelCase__ : int = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_snake_case ):
return num
if __name__ == "__main__":
UpperCamelCase_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 209 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = '''speech_to_text'''
UpperCAmelCase : List[Any] = ['''past_key_values''']
UpperCAmelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , _UpperCAmelCase : Union[str, Any]=10_000 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2_048 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Tuple=2_048 , _UpperCAmelCase : str=4 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : List[Any]=256 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[str]=6_000 , _UpperCAmelCase : Optional[Any]=1_024 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Any=(5, 5) , _UpperCAmelCase : int=1_024 , _UpperCAmelCase : str=80 , _UpperCAmelCase : Any=1 , **_UpperCAmelCase : Tuple , ):
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(_UpperCAmelCase )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 7 | 0 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 461 |
"""simple docstring"""
from manim import *
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Rectangle(height=0.5 , width=0.5 )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A = Rectangle(height=0.25 , width=0.25 )
_A = [mem.copy() for i in range(6 )]
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('CPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(4 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('GPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Model' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
_A = []
_A = []
for i, rect in enumerate(_UpperCAmelCase ):
_A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
_A = [meta_mem.copy() for i in range(6 )]
_A = [meta_mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Disk' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
_A = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
_A = Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
_A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_A = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
_A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_A = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_A = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_A = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_A = a_c
_A = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
_A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait()
| 7 | 0 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = year % 19
lowerCAmelCase__ : Dict = year % 4
lowerCAmelCase__ : Optional[int] = year % 7
lowerCAmelCase__ : Any = math.floor(year / 100 )
lowerCAmelCase__ : Optional[int] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowerCAmelCase__ : Optional[Any] = leap_day_inhibits / 4
lowerCAmelCase__ : Dict = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowerCAmelCase__ : str = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowerCAmelCase__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowerCAmelCase__ : Optional[int] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 18 )
else:
return datetime(_snake_case , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
_lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 565 |
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def _snake_case ( ) -> None:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 7 | 0 |
"""simple docstring"""
import itertools
import os
import re
a__ : Optional[int] = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
a__ : Tuple = re.compile(r"""([a-z\d])([A-Z])""")
a__ : Tuple = re.compile(r"""(?<!_)_(?!_)""")
a__ : Optional[Any] = re.compile(r"""(_{2,})""")
a__ : int = r"""^\w+(\.\w+)*$"""
a__ : List[Any] = r"""<>:/\|?*"""
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = _uppercase_uppercase_re.sub(R'\1_\2', _snake_case )
_lowerCAmelCase = _lowercase_uppercase_re.sub(R'\1_\2', _snake_case )
return name.lower()
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = _single_underscore_re.split(_snake_case )
_lowerCAmelCase = [_multiple_underscores_re.split(_snake_case ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_snake_case ) if n != '' )
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if os.path.basename(_snake_case ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(_snake_case )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
if os.path.basename(_snake_case ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re, _snake_case ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(_snake_case )}-{split}'''
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
"""simple docstring"""
_lowerCAmelCase = filename_prefix_for_split(_snake_case, _snake_case )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
_lowerCAmelCase = os.path.join(_snake_case, _snake_case )
return F'''{filepath}*'''
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None ):
"""simple docstring"""
_lowerCAmelCase = filename_prefix_for_split(_snake_case, _snake_case )
_lowerCAmelCase = os.path.join(_snake_case, _snake_case )
if shard_lengths:
_lowerCAmelCase = len(_snake_case )
_lowerCAmelCase = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(_snake_case )]
if filetype_suffix:
_lowerCAmelCase = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
_lowerCAmelCase = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 589 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
a = logging.getLogger(__name__)
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''whether to use adafactor'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(default=__lowerCAmelCase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[str] = field(
default='''linear''' , metadata={'''help''': f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 7 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __a ( __UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
for param in module.parameters():
lowerCamelCase_ : Any = False
def __a ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : str = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCamelCase_ : Union[str, Any] = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues"
" with generations." )
return device
def __a ( __UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = plt.imshow(_snake_case )
fig.axes.get_xaxis().set_visible(_snake_case )
fig.axes.get_yaxis().set_visible(_snake_case )
plt.show()
def __a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = datetime.now()
lowerCamelCase_ : Union[str, Any] = current_time.strftime("%H:%M:%S" )
return timestamp
| 488 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 7 | 0 |
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
for i in range(len(_snake_case ) - 1 , 0 , -1 ):
a_ = False
for j in range(_snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
a_ , a_ = unsorted[j - 1], unsorted[j]
a_ = True
for j in range(_snake_case ):
if unsorted[j] > unsorted[j + 1]:
a_ , a_ = unsorted[j + 1], unsorted[j]
a_ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple =input("""Enter numbers separated by a comma:\n""").strip()
A_ : str =[int(item) for item in user_input.split(""",""")]
print(F'''{cocktail_shaker_sort(unsorted) = }''') | 483 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : UNetaDModel
UpperCAmelCase : KarrasVeScheduler
def __init__( self : Any , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Optional[Any] , ):
_A = self.unet.config.sample_size
_A = (batch_size, 3, img_size, img_size)
_A = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_A = self.scheduler.schedule[t]
_A = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_A , _A = self.scheduler.add_noise_to_input(_UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_A = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_A = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_A = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_A = self.scheduler.step_correct(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , step_output.prev_sample , step_output['derivative'] , )
_A = step_output.prev_sample
_A = (sample / 2 + 0.5).clamp(0 , 1 )
_A = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 7 | 0 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
SCREAMING_SNAKE_CASE = 'src/transformers'
SCREAMING_SNAKE_CASE = 'docs/source/en'
SCREAMING_SNAKE_CASE = '.'
def lowercase_ ( __A : Tuple , __A : Dict , __A : Tuple ) -> Optional[Any]:
"""simple docstring"""
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase : List[str] =f.readlines()
# Find the start prompt.
lowercase : int =0
while not lines[start_index].startswith(_snake_case ):
start_index += 1
start_index += 1
lowercase : List[str] =start_index
while not lines[end_index].startswith(_snake_case ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
SCREAMING_SNAKE_CASE = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
SCREAMING_SNAKE_CASE = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
SCREAMING_SNAKE_CASE = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
SCREAMING_SNAKE_CASE = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE = direct_transformers_import(TRANSFORMERS_PATH)
def lowercase_ ( __A : Dict ) -> List[str]:
"""simple docstring"""
lowercase : Dict =re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , _snake_case )
return [m.group(0 ) for m in matches]
def lowercase_ ( __A : str , __A : Any ) -> int:
"""simple docstring"""
lowercase : List[Any] =2 if text == '''✅''' or text == '''❌''' else len(_snake_case )
lowercase : str =(width - text_length) // 2
lowercase : Any =width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowercase_ ( ) -> Optional[Any]:
"""simple docstring"""
lowercase : str =transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowercase : Optional[Any] ={
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
lowercase : str ={name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
lowercase : Union[str, Any] =collections.defaultdict(_snake_case )
lowercase : List[str] =collections.defaultdict(_snake_case )
lowercase : Union[str, Any] =collections.defaultdict(_snake_case )
lowercase : Union[str, Any] =collections.defaultdict(_snake_case )
lowercase : Union[str, Any] =collections.defaultdict(_snake_case )
# Let's lookup through all transformers object (once).
for attr_name in dir(_snake_case ):
lowercase : str =None
if attr_name.endswith('''Tokenizer''' ):
lowercase : int =slow_tokenizers
lowercase : List[Any] =attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
lowercase : Dict =fast_tokenizers
lowercase : Tuple =attr_name[:-1_3]
elif _re_tf_models.match(_snake_case ) is not None:
lowercase : Optional[int] =tf_models
lowercase : Optional[int] =_re_tf_models.match(_snake_case ).groups()[0]
elif _re_flax_models.match(_snake_case ) is not None:
lowercase : Optional[Any] =flax_models
lowercase : str =_re_flax_models.match(_snake_case ).groups()[0]
elif _re_pt_models.match(_snake_case ) is not None:
lowercase : Union[str, Any] =pt_models
lowercase : Dict =_re_pt_models.match(_snake_case ).groups()[0]
if lookup_dict is not None:
while len(_snake_case ) > 0:
if attr_name in model_name_to_prefix.values():
lowercase : int =True
break
# Try again after removing the last word in the name
lowercase : Optional[int] =''''''.join(camel_case_split(_snake_case )[:-1] )
# Let's build that table!
lowercase : Dict =list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
lowercase : List[Any] =['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
lowercase : List[str] =[len(_snake_case ) + 2 for c in columns]
lowercase : int =max([len(_snake_case ) for name in model_names] ) + 2
# Build the table per se
lowercase : Dict ='''|''' + '''|'''.join([_center_text(_snake_case , _snake_case ) for c, w in zip(_snake_case , _snake_case )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
lowercase : Dict ={True: '''✅''', False: '''❌'''}
for name in model_names:
lowercase : str =model_name_to_prefix[name]
lowercase : Optional[int] =[
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_snake_case , _snake_case ) for l, w in zip(_snake_case , _snake_case )] ) + "|\n"
return table
def lowercase_ ( __A : List[str]=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase , lowercase , lowercase , lowercase : List[Any] =_find_text_in_file(
filename=os.path.join(_snake_case , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
lowercase : Union[str, Any] =get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_snake_case , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 94 |
"""simple docstring"""
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = None
_A = None
_A = graph
self._normalize_graph(_UpperCAmelCase , _UpperCAmelCase )
_A = len(_UpperCAmelCase )
_A = None
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_UpperCAmelCase ) > 1 or len(_UpperCAmelCase ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ):
_A = algorithm(self )
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Union[str, Any] ):
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
self._algorithm()
_A = True
def lowerCAmelCase_ ( self : int ):
pass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Any ):
super().__init__(_UpperCAmelCase )
# use this to save your result
_A = -1
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[Any] ):
super().__init__(_UpperCAmelCase )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def lowerCAmelCase_ ( self : Dict ):
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(_UpperCAmelCase ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(_UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_UpperCAmelCase ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_UpperCAmelCase , _UpperCAmelCase )
self.relabel(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int ):
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a = [0]
a = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 7 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_lowercase : List[str] = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCamelCase__: List[str] , UpperCamelCase__: Any ) -> Dict:
"""simple docstring"""
A = set()
A = []
def parse_line(UpperCamelCase__: Optional[Any] ):
for line in fp:
if isinstance(_snake_case , _snake_case ):
A = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(_snake_case ) > 0:
A = """\n""".join(_snake_case )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(_snake_case )
buffer.clear()
continue
else:
A = line.strip()
buffer.append(_snake_case )
if from_gh:
for filename in os.listdir(_snake_case ):
A = os.path.join(_snake_case , _snake_case )
if not os.path.isdir(_snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(_snake_case ) as fp:
parse_line(_snake_case )
else:
try:
with zipfile.ZipFile(_snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(_snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_snake_case ) as fp:
parse_line(_snake_case )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] ) -> Dict:
"""simple docstring"""
A = set()
A = [os.path.join(_snake_case , _snake_case ) for p in os.listdir(_snake_case ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_snake_case , _snake_case ) )
return selected_warnings
if __name__ == "__main__":
def _lowerCAmelCase ( UpperCamelCase__: int ) -> Dict:
"""simple docstring"""
return values.split(""",""" )
_lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
_lowercase : int = parser.parse_args()
_lowercase : Optional[Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_lowercase : List[str] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_lowercase : Optional[int] = extract_warnings(args.output_dir, args.targets)
_lowercase : Optional[int] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 641 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = SpeechTaTokenizer
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[int] = True
def lowerCAmelCase_ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
_A = SpeechTaTokenizer(_UpperCAmelCase )
_A = AddedToken('<mask>' , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )
_A = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
_A = 'this is a test'
_A = 'this is a test'
return input_text, output_text
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict=20 , _UpperCAmelCase : str=5 ):
_A , _A = self.get_input_output_texts(_UpperCAmelCase )
_A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_A = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = '<pad>'
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(_UpperCAmelCase ) , 81 )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase_ ( self : Any ):
_A = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_A = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_A = tokenizer.add_tokens(_UpperCAmelCase )
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size + len(_UpperCAmelCase ) )
_A = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_A = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_A = tokenizer.add_special_tokens(_UpperCAmelCase )
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size_a + len(_UpperCAmelCase ) )
_A = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Any ):
pass
def lowerCAmelCase_ ( self : Dict ):
_A = self.get_tokenizer()
_A = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_A = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
# Use custom sequence because this tokenizer does not handle numbers.
_A = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_A = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_UpperCAmelCase , )
| 7 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class _lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
__lowercase : Tuple = '''altclip_text_model'''
def __init__( self , __lowercase=250_002 , __lowercase=1_024 , __lowercase=24 , __lowercase=16 , __lowercase=4_096 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=514 , __lowercase=1 , __lowercase=0.0_2 , __lowercase=0.0_2 , __lowercase=1E-05 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=768 , **__lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__A : Dict = vocab_size
__A : List[Any] = hidden_size
__A : Any = num_hidden_layers
__A : Dict = num_attention_heads
__A : Dict = hidden_act
__A : Tuple = intermediate_size
__A : Optional[Any] = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : Tuple = type_vocab_size
__A : int = initializer_range
__A : str = initializer_factor
__A : List[Any] = layer_norm_eps
__A : Union[str, Any] = position_embedding_type
__A : Dict = use_cache
__A : Tuple = project_dim
class _lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
__lowercase : Tuple = '''altclip_vision_model'''
def __init__( self , __lowercase=768 , __lowercase=3_072 , __lowercase=512 , __lowercase=12 , __lowercase=12 , __lowercase=3 , __lowercase=224 , __lowercase=32 , __lowercase="quick_gelu" , __lowercase=1E-5 , __lowercase=0.0 , __lowercase=0.0_2 , __lowercase=1.0 , **__lowercase , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__A : List[Any] = hidden_size
__A : Dict = intermediate_size
__A : Optional[Any] = projection_dim
__A : List[str] = num_hidden_layers
__A : Optional[int] = num_attention_heads
__A : Any = num_channels
__A : Optional[int] = patch_size
__A : List[str] = image_size
__A : List[Any] = initializer_range
__A : Any = initializer_factor
__A : List[Any] = attention_dropout
__A : str = layer_norm_eps
__A : List[str] = hidden_act
@classmethod
def snake_case__ ( cls , __lowercase , **__lowercase ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
__A ,__A : Union[str, Any] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
__A : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class _lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
__lowercase : Dict = '''altclip'''
__lowercase : Optional[int] = True
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=768 , __lowercase=2.6_5_9_2 , **__lowercase ):
"""simple docstring"""
__A : int = kwargs.pop('text_config_dict' , _UpperCAmelCase )
__A : int = kwargs.pop('vision_config_dict' , _UpperCAmelCase )
super().__init__(**_UpperCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__A : List[Any] = {}
# This is the complete result when using `text_config_dict`.
__A : Optional[int] = AltCLIPTextConfig(**_UpperCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__A : Optional[int] = (
F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
F"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
__A : int = (
F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
F"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(_UpperCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__A : str = {}
# This is the complete result when using `vision_config_dict`.
__A : Optional[int] = AltCLIPVisionConfig(**_UpperCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__A : List[str] = {
str(_UpperCAmelCase ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__A : Any = (
F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
__A : str = (
F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
F"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(_UpperCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__A : int = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
__A : Tuple = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
__A : Dict = AltCLIPTextConfig(**_UpperCAmelCase )
__A : str = AltCLIPVisionConfig(**_UpperCAmelCase )
__A : str = projection_dim
__A : int = logit_scale_init_value
__A : str = 1.0
@classmethod
def snake_case__ ( cls , __lowercase , __lowercase , **__lowercase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def snake_case__ ( self ):
"""simple docstring"""
__A : int = copy.deepcopy(self.__dict__ )
__A : Optional[Any] = self.text_config.to_dict()
__A : Union[str, Any] = self.vision_config.to_dict()
__A : Optional[Any] = self.__class__.model_type
return output
| 365 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 7 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=1_6 , _A=3_6 , _A=6 , _A=6 , _A=6 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCamelCase : Any = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : List[Any] = seq_length
UpperCamelCase : Tuple = is_training
UpperCamelCase : List[Any] = use_input_mask
UpperCamelCase : str = use_token_type_ids
UpperCamelCase : Dict = use_labels
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : int = embedding_size
UpperCamelCase : str = hidden_size
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : str = num_hidden_groups
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : Tuple = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : Optional[int] = max_position_embeddings
UpperCamelCase : Optional[int] = type_vocab_size
UpperCamelCase : int = type_sequence_label_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : Optional[Any] = num_labels
UpperCamelCase : Optional[Any] = num_choices
UpperCamelCase : str = scope
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : int = None
if self.use_input_mask:
UpperCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : Optional[Any] = None
UpperCamelCase : int = None
UpperCamelCase : List[str] = None
if self.use_labels:
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = AlbertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase : Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCamelCase : Union[str, Any] = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCamelCase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Dict = AlbertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase : Dict = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , sentence_order_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = AlbertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = AlbertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase : int = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : Dict = self.num_labels
UpperCamelCase : Any = AlbertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.num_labels
UpperCamelCase : Optional[Any] = AlbertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = self.num_choices
UpperCamelCase : int = AlbertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCamelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Union[str, Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
UpperCamelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Any = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : List[Any] = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : int = True
def _a ( self , _A , _A , _A=False ):
'''simple docstring'''
UpperCamelCase : Any = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
UpperCamelCase : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = AlbertModelTester(self )
UpperCamelCase : int = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def _a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase : List[Any] = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
@slow
def _a ( self ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Tuple = AlbertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = AlbertModel.from_pretrained("""albert-base-v2""" )
UpperCamelCase : Optional[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
UpperCamelCase : Union[str, Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCamelCase : str = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 102 |
"""simple docstring"""
import argparse
a = '''docs/source/_static/js/custom.js'''
def _snake_case ( _snake_case : Dict ) -> Any:
'''simple docstring'''
with open(_snake_case , encoding='utf-8' , newline='\n' ) as f:
_A = f.readlines()
_A = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
_A = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(_snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
a = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCAmelCase = 'src/diffusers'
__lowerCAmelCase = '.'
# This is to make sure the diffusers module imported is the one in the repo.
__lowerCAmelCase = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
__lowerCAmelCase = spec.loader.load_module()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return line.startswith(_snake_case ) or len(_snake_case ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , _snake_case ) is not None
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = object_name.split(""".""" )
_snake_case = 0
# First let's find the module where our object lives.
_snake_case = parts[i]
while i < len(_snake_case ) and not os.path.isfile(os.path.join(_snake_case , f"""{module}.py""" ) ):
i += 1
if i < len(_snake_case ):
_snake_case = os.path.join(_snake_case , parts[i] )
if i >= len(_snake_case ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(_snake_case , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_snake_case = f.readlines()
# Now let's find the class / func in the code!
_snake_case = """"""
_snake_case = 0
for name in parts[i + 1 :]:
while (
line_index < len(_snake_case ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_snake_case ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_snake_case = line_index
while line_index < len(_snake_case ) and _should_continue(lines[line_index] , _snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_snake_case = lines[start_index:line_index]
return "".join(_snake_case )
__lowerCAmelCase = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
__lowerCAmelCase = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
__lowerCAmelCase = re.compile(r'<FILL\s+[^>]*>')
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = code.split("""\n""" )
_snake_case = 0
while idx < len(_snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_snake_case ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = len(get_indent(_snake_case ) ) > 0
if has_indent:
_snake_case = f"""class Bla:\n{code}"""
_snake_case = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_snake_case )
_snake_case = black.format_str(_snake_case , mode=_snake_case )
_snake_case, _snake_case = style_docstrings_in_code(_snake_case )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
with open(_snake_case , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_snake_case = f.readlines()
_snake_case = []
_snake_case = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_snake_case ):
_snake_case = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_snake_case, _snake_case, _snake_case = search.groups()
_snake_case = find_code_in_diffusers(_snake_case )
_snake_case = get_indent(_snake_case )
_snake_case = line_index + 1 if indent == theoretical_indent else line_index + 2
_snake_case = theoretical_indent
_snake_case = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_snake_case = True
while line_index < len(_snake_case ) and should_continue:
line_index += 1
if line_index >= len(_snake_case ):
break
_snake_case = lines[line_index]
_snake_case = _should_continue(_snake_case , _snake_case ) and re.search(f"""^{indent}# End copy""" , _snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_snake_case = lines[start_index:line_index]
_snake_case = """""".join(_snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
_snake_case = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(_snake_case ) is None]
_snake_case = """\n""".join(_snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(_snake_case ) > 0:
_snake_case = replace_pattern.replace("""with""" , """""" ).split(""",""" )
_snake_case = [_re_replace_pattern.search(_snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_snake_case, _snake_case, _snake_case = pattern.groups()
_snake_case = re.sub(_snake_case , _snake_case , _snake_case )
if option.strip() == "all-casing":
_snake_case = re.sub(obja.lower() , obja.lower() , _snake_case )
_snake_case = re.sub(obja.upper() , obja.upper() , _snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_snake_case = blackify(lines[start_index - 1] + theoretical_code )
_snake_case = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_snake_case = lines[:start_index] + [theoretical_code] + lines[line_index:]
_snake_case = start_index + 1
if overwrite and len(_snake_case ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(_snake_case , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_snake_case )
return diffs
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = False ):
_snake_case = glob.glob(os.path.join(_snake_case , """**/*.py""" ) , recursive=_snake_case )
_snake_case = []
for filename in all_files:
_snake_case = is_copy_consistent(_snake_case , _snake_case )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(_snake_case ) > 0:
_snake_case = """\n""".join(_snake_case )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__lowerCAmelCase = parser.parse_args()
check_copies(args.fix_and_overwrite) | 585 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''vit_mae'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Optional[int]=3_072 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : Optional[Any]=224 , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=512 , _UpperCAmelCase : int=8 , _UpperCAmelCase : List[Any]=2_048 , _UpperCAmelCase : Optional[Any]=0.75 , _UpperCAmelCase : List[str]=False , **_UpperCAmelCase : Union[str, Any] , ):
super().__init__(**_UpperCAmelCase )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = decoder_num_attention_heads
_A = decoder_hidden_size
_A = decoder_num_hidden_layers
_A = decoder_intermediate_size
_A = mask_ratio
_A = norm_pix_loss
| 7 | 0 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase_ = []
UpperCamelCase_ = []
UpperCamelCase_ = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase_ = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': f"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
'''emoji''': True,
},
}
]
UpperCamelCase_ = 0
for log in Path().glob('''*.log'''):
UpperCamelCase_ = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase_ = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase_ = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase_ = f"{line['duration']:.4f}"
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase_ = []
log.unlink()
UpperCamelCase_ = ''''''
UpperCamelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase_ = []
UpperCamelCase_ = {}
for test in failed_tests:
UpperCamelCase_ = test[0].split('''::''')
UpperCamelCase_ = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase_ = [test[0] for test in failed_table]
UpperCamelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase_ = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase_ = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase_ = len(err) + 10
UpperCamelCase_ = message[: 30_00 - offset] + f"\n...\n```\n{err}"
print(f"### {message}")
else:
UpperCamelCase_ = '''No failed tests! 🤗'''
print(f"## {message}")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase_ = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase_ = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase_ = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': f"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
UpperCamelCase_ = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': f"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
UpperCamelCase_ = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase_ = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase_ = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase_ = row[0]
else:
UpperCamelCase_ = ''''''
UpperCamelCase_ = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': f"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 209 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
a = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _snake_case ( _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
_A = torch.load(_snake_case , map_location='cpu' )
return sd
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Tuple=rename_keys_prefix ) -> List[str]:
'''simple docstring'''
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1] )
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def _snake_case ( _snake_case : List[str] , _snake_case : Dict ) -> Dict:
'''simple docstring'''
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = 'pretraining'
if "vcr" in checkpoint_path:
_A = {'visual_embedding_dim': 5_12}
elif "vqa_advanced" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
elif "vqa" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
elif "nlvr" in checkpoint_path:
_A = {'visual_embedding_dim': 10_24}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
_A = {'visual_embedding_dim': 5_12}
_A = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
_A = 'vqa_advanced'
elif "vqa" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48, 'num_labels': 31_29}
_A = 'vqa'
elif "nlvr" in checkpoint_path:
_A = {
'visual_embedding_dim': 10_24,
'num_labels': 2,
}
_A = 'nlvr'
_A = VisualBertConfig(**_snake_case )
# Load State Dict
_A = load_state_dict(_snake_case )
_A = get_new_dict(_snake_case , _snake_case )
if model_type == "pretraining":
_A = VisualBertForPreTraining(_snake_case )
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(_snake_case )
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(_snake_case )
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(_snake_case )
model.load_state_dict(_snake_case )
# Save Checkpoints
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
a = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 7 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ : Any = logging.get_logger(__name__)
class __lowercase ( __lowerCAmelCase ):
_A = ['''pixel_values''']
def __init__(self : int , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : bool = True , snake_case : Dict[str, int] = None , snake_case : bool = True , snake_case : Union[int, float] = 1 / 255 , snake_case : bool = True , snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **snake_case : Any , ) -> Optional[int]:
super().__init__(**_UpperCAmelCase )
_lowercase : Tuple = size if size is not None else {"shortest_edge": 224}
_lowercase : Optional[int] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
_lowercase : List[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowercase : List[Any] = get_size_dict(_UpperCAmelCase , param_name="crop_size" )
_lowercase : List[Any] = do_resize
_lowercase : List[str] = size
_lowercase : Optional[Any] = resample
_lowercase : Optional[int] = do_center_crop
_lowercase : str = crop_size
_lowercase : Union[str, Any] = do_rescale
_lowercase : Optional[int] = rescale_factor
_lowercase : Optional[Any] = do_normalize
_lowercase : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowercase : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a(self : Dict , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Dict , ) -> Optional[Any]:
_lowercase : List[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_lowercase : str = int((256 / 224) * size["shortest_edge"] )
_lowercase : Any = get_resize_output_image_size(_UpperCAmelCase , size=_UpperCAmelCase , default_to_square=_UpperCAmelCase )
_lowercase : Optional[int] = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}""" )
return resize(
_UpperCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _a(self : Dict , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Optional[Any] , ) -> Optional[Any]:
_lowercase : Optional[Any] = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys \'height\' and \'width\'. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size["height"], size["width"]) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _a(self : Dict , snake_case : np.ndarray , snake_case : Union[int, float] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Optional[int] , ) -> Tuple:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _a(self : str , snake_case : np.ndarray , snake_case : Union[float, List[float]] , snake_case : Union[float, List[float]] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : int , ) -> List[Any]:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _a(self : Tuple , snake_case : ImageInput , snake_case : Optional[bool] = None , snake_case : Optional[Dict[str, int]] = None , snake_case : PILImageResampling = None , snake_case : Optional[bool] = None , snake_case : Optional[Dict[str, int]] = None , snake_case : Optional[bool] = None , snake_case : Optional[float] = None , snake_case : Optional[bool] = None , snake_case : Optional[Union[float, Iterable[float]]] = None , snake_case : Optional[Union[float, Iterable[float]]] = None , snake_case : Optional[TensorType] = None , snake_case : ChannelDimension = ChannelDimension.FIRST , **snake_case : Dict , ) -> List[Any]:
_lowercase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
_lowercase : str = resample if resample is not None else self.resample
_lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : Dict = image_std if image_std is not None else self.image_std
_lowercase : List[str] = size if size is not None else self.size
_lowercase : Tuple = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
_lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
_lowercase : Union[str, Any] = get_size_dict(_UpperCAmelCase , param_name="crop_size" )
_lowercase : Optional[Any] = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowercase : List[str] = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
_lowercase : Dict = [self.resize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for image in images]
if do_center_crop:
_lowercase : int = [self.center_crop(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
if do_rescale:
_lowercase : List[str] = [self.rescale(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
if do_normalize:
_lowercase : Optional[int] = [self.normalize(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for image in images]
_lowercase : str = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
_lowercase : List[Any] = {"pixel_values": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 461 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
for param in module.parameters():
_A = False
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_A = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = plt.imshow(_snake_case )
fig.axes.get_xaxis().set_visible(_snake_case )
fig.axes.get_yaxis().set_visible(_snake_case )
plt.show()
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = datetime.now()
_A = current_time.strftime('%H:%M:%S' )
return timestamp
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
"""simple docstring"""
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
"""simple docstring"""
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
_snake_case , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 565 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
UpperCAmelCase : Optional[int] = '''ViTImageProcessor'''
UpperCAmelCase : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Tuple , _UpperCAmelCase : int=None , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : Dict ):
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[Any] , _UpperCAmelCase : int=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Union[str, Any] ):
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
_A = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
_A = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_A = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self : Tuple ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 7 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
a__ : List[Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
UpperCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCamelCase : Optional[str] = field(
default=__lowerCAmelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCamelCase : Optional[str] = field(
default=__lowerCAmelCase ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCamelCase : Optional[str] = field(
default=__lowerCAmelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
UpperCamelCase : bool = field(default=__lowerCAmelCase ,metadata={"help": "Whether tp freeze the encoder."} )
UpperCamelCase : bool = field(default=__lowerCAmelCase ,metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class __magic_name__ :
UpperCamelCase : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCamelCase : Optional[str] = field(
default="summarization" ,metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} ,)
UpperCamelCase : Optional[int] = field(
default=1_024 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
UpperCamelCase : Optional[int] = field(
default=128 ,metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
UpperCamelCase : Optional[int] = field(
default=142 ,metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} ,)
UpperCamelCase : Optional[int] = field(
default=142 ,metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
UpperCamelCase : Optional[int] = field(default=-1 ,metadata={"help": "# training examples. -1 means use all."} )
UpperCamelCase : Optional[int] = field(default=-1 ,metadata={"help": "# validation examples. -1 means use all."} )
UpperCamelCase : Optional[int] = field(default=-1 ,metadata={"help": "# test examples. -1 means use all."} )
UpperCamelCase : Optional[str] = field(default=__lowerCAmelCase ,metadata={"help": "Source language id for translation."} )
UpperCamelCase : Optional[str] = field(default=__lowerCAmelCase ,metadata={"help": "Target language id for translation."} )
UpperCamelCase : Optional[int] = field(default=__lowerCAmelCase ,metadata={"help": "# num_beams to use for evaluation."} )
UpperCamelCase : bool = field(
default=__lowerCAmelCase ,metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} ,)
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_snake_case, os.path.join(_snake_case, F'''{split}_results.json''' ) )
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(_snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ), training_args.fpaa, )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s', _snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
_lowerCAmelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(_snake_case, _snake_case, _snake_case ):
assert hasattr(_snake_case, _snake_case ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_snake_case, _snake_case, getattr(_snake_case, _snake_case ) )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
_lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path, from_tf='.ckpt' in model_args.model_name_or_path, config=_snake_case, cache_dir=model_args.cache_dir, )
# use task specific params
use_task_specific_params(_snake_case, data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_snake_case, (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_snake_case, _snake_case ):
_lowerCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_snake_case )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCAmelCase = SeqaSeqDataset
# Get datasets
_lowerCAmelCase = (
dataset_class(
_snake_case, type_path='train', data_dir=data_args.data_dir, n_obs=data_args.n_train, max_target_length=data_args.max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '', )
if training_args.do_train
else None
)
_lowerCAmelCase = (
dataset_class(
_snake_case, type_path='val', data_dir=data_args.data_dir, n_obs=data_args.n_val, max_target_length=data_args.val_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '', )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCAmelCase = (
dataset_class(
_snake_case, type_path='test', data_dir=data_args.data_dir, n_obs=data_args.n_test, max_target_length=data_args.test_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '', )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCAmelCase = (
build_compute_metrics_fn(data_args.task, _snake_case ) if training_args.predict_with_generate else None
)
_lowerCAmelCase = SeqaSeqTrainer(
model=_snake_case, args=_snake_case, data_args=_snake_case, train_dataset=_snake_case, eval_dataset=_snake_case, data_collator=SeqaSeqDataCollator(
_snake_case, _snake_case, model.config.decoder_start_token_id, training_args.tpu_num_cores ), compute_metrics=_snake_case, tokenizer=_snake_case, )
_lowerCAmelCase = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
_lowerCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCAmelCase = train_result.metrics
_lowerCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train', _snake_case, training_args.output_dir )
all_metrics.update(_snake_case )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCAmelCase = trainer.evaluate(metric_key_prefix='val' )
_lowerCAmelCase = data_args.n_val
_lowerCAmelCase = round(metrics['val_loss'], 4 )
if trainer.is_world_process_zero():
handle_metrics('val', _snake_case, training_args.output_dir )
all_metrics.update(_snake_case )
if training_args.do_predict:
logger.info('*** Predict ***' )
_lowerCAmelCase = trainer.predict(test_dataset=_snake_case, metric_key_prefix='test' )
_lowerCAmelCase = test_output.metrics
_lowerCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_lowerCAmelCase = round(metrics['test_loss'], 4 )
handle_metrics('test', _snake_case, training_args.output_dir )
all_metrics.update(_snake_case )
if training_args.predict_with_generate:
_lowerCAmelCase = tokenizer.batch_decode(
test_output.predictions, skip_special_tokens=_snake_case, clean_up_tokenization_spaces=_snake_case )
_lowerCAmelCase = lmap(str.strip, _snake_case )
write_txt_file(_snake_case, os.path.join(training_args.output_dir, 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(_snake_case, os.path.join(training_args.output_dir, 'all_results.json' ) )
return all_metrics
def A__ ( __lowerCamelCase ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 589 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def _snake_case ( _snake_case : int ) -> datetime:
'''simple docstring'''
_A = year % 19
_A = year % 4
_A = year % 7
_A = math.floor(year / 1_00 )
_A = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_A = leap_day_inhibits / 4
_A = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_A = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_A = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_A = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 18 )
else:
return datetime(_snake_case , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
a = '''will be''' if year > datetime.now().year else '''was'''
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 7 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ : str = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
lowerCamelCase_ : int = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("RGB" )
lowerCamelCase_ : Union[str, Any] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
lowerCamelCase_ : Optional[Any] = transform(_snake_case ).unsqueeze(0 ).to(_snake_case )
return image
def __a ( __UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
if "visual_encoder" in key:
lowerCamelCase_ : List[str] = re.sub("visual_encoder*" , "vision_model.encoder" , _snake_case )
if "blocks" in key:
lowerCamelCase_ : Union[str, Any] = re.sub(R"blocks" , "layers" , _snake_case )
if "attn" in key:
lowerCamelCase_ : Union[str, Any] = re.sub(R"attn" , "self_attn" , _snake_case )
if "norm1" in key:
lowerCamelCase_ : Optional[int] = re.sub(R"norm1" , "layer_norm1" , _snake_case )
if "norm2" in key:
lowerCamelCase_ : Tuple = re.sub(R"norm2" , "layer_norm2" , _snake_case )
if "encoder.norm" in key:
lowerCamelCase_ : Dict = re.sub(R"encoder.norm" , "post_layernorm" , _snake_case )
if "encoder.patch_embed.proj" in key:
lowerCamelCase_ : Optional[int] = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , _snake_case )
if "encoder.pos_embed" in key:
lowerCamelCase_ : List[Any] = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , _snake_case )
if "encoder.cls_token" in key:
lowerCamelCase_ : int = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , _snake_case )
if "self_attn" in key:
lowerCamelCase_ : Dict = re.sub(R"self_attn.proj" , "self_attn.projection" , _snake_case )
return key
@torch.no_grad()
def __a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str]=None ) -> Any:
"""simple docstring"""
if config_path is not None:
lowerCamelCase_ : Optional[int] = BlipConfig.from_pretrained(_snake_case )
else:
lowerCamelCase_ : Any = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowerCamelCase_ : Union[str, Any] = BlipForConditionalGeneration(_snake_case ).eval()
lowerCamelCase_ : str = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
lowerCamelCase_ : int = blip_decoder(pretrained=_snake_case , image_size=384 , vit="base" )
lowerCamelCase_ : int = pt_model.eval()
lowerCamelCase_ : Any = pt_model.state_dict()
for key in modified_state_dict.copy():
lowerCamelCase_ : Tuple = modified_state_dict.pop(_snake_case )
lowerCamelCase_ : Optional[int] = rename_key(_snake_case )
lowerCamelCase_ : Union[str, Any] = value
hf_model.load_state_dict(_snake_case )
lowerCamelCase_ : Dict = 384
lowerCamelCase_ : Union[str, Any] = load_demo_image(image_size=_snake_case , device="cpu" )
lowerCamelCase_ : Dict = BertTokenizer.from_pretrained("bert-base-uncased" )
lowerCamelCase_ : List[Any] = tokenizer(["a picture of"] ).input_ids
lowerCamelCase_ : Dict = hf_model.generate(_snake_case , _snake_case )
assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowerCamelCase_ : Any = hf_model.generate(_snake_case )
assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_snake_case )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowerCamelCase_ : Dict = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
lowerCamelCase_ : Tuple = blip_vqa(pretrained=_snake_case , image_size=_snake_case , vit="base" )
vqa_model.eval()
lowerCamelCase_ : List[Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowerCamelCase_ : Optional[int] = modified_state_dict.pop(_snake_case )
lowerCamelCase_ : Any = rename_key(_snake_case )
lowerCamelCase_ : Tuple = value
lowerCamelCase_ : Tuple = BlipForQuestionAnswering(_snake_case )
hf_vqa_model.load_state_dict(_snake_case )
lowerCamelCase_ : List[str] = ["How many dogs are in this image?"]
lowerCamelCase_ : Optional[Any] = tokenizer(_snake_case , return_tensors="pt" ).input_ids
lowerCamelCase_ : int = hf_vqa_model.generate(_snake_case , _snake_case )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
lowerCamelCase_ : Any = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
lowerCamelCase_ : Tuple = blip_itm(pretrained=_snake_case , image_size=_snake_case , vit="base" )
itm_model.eval()
lowerCamelCase_ : Union[str, Any] = itm_model.state_dict()
for key in modified_state_dict.copy():
lowerCamelCase_ : Optional[Any] = modified_state_dict.pop(_snake_case )
lowerCamelCase_ : Optional[Any] = rename_key(_snake_case )
lowerCamelCase_ : Tuple = value
lowerCamelCase_ : Optional[Any] = BlipForImageTextRetrieval(_snake_case )
lowerCamelCase_ : Union[str, Any] = ["A picture of a woman with a dog sitting in a beach"]
lowerCamelCase_ : Dict = tokenizer(
_snake_case , return_tensors="pt" , padding="max_length" , truncation=_snake_case , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_snake_case )
hf_itm_model.eval()
lowerCamelCase_ : Any = hf_itm_model(_snake_case , _snake_case , use_itm_head=_snake_case )
lowerCamelCase_ : int = hf_itm_model(_snake_case , _snake_case , use_itm_head=_snake_case )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
snake_case_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
snake_case_ : Dict = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 488 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''gpt_bigcode'''
UpperCAmelCase : str = ['''past_key_values''']
UpperCAmelCase : Dict = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , _UpperCAmelCase : Dict=50_257 , _UpperCAmelCase : List[Any]=1_024 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : str="gelu_pytorch_tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=50_256 , _UpperCAmelCase : Dict=50_256 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Any , ):
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = n_inner
_A = activation_function
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = scale_attn_weights
_A = use_cache
_A = attention_softmax_in_fpaa
_A = scale_attention_softmax_in_fpaa
_A = multi_query
_A = bos_token_id
_A = eos_token_id
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 7 | 0 |
def lowerCamelCase_ ( UpperCAmelCase__ = 100 ):
"""simple docstring"""
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''') | 483 |
"""simple docstring"""
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
return " ".join(
''.join(word[::-1] ) if len(_snake_case ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 7 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class UpperCAmelCase_ ( __lowerCAmelCase ):
"""simple docstring"""
UpperCamelCase_ = '''vit_mae'''
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[int]=768 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : Optional[int]=3072 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : List[Any]=1e-12 , UpperCAmelCase : Optional[Any]=224 , UpperCAmelCase : int=16 , UpperCAmelCase : str=3 , UpperCAmelCase : Tuple=True , UpperCAmelCase : int=16 , UpperCAmelCase : str=512 , UpperCAmelCase : int=8 , UpperCAmelCase : List[Any]=2048 , UpperCAmelCase : Optional[Any]=0.7_5 , UpperCAmelCase : List[str]=False , **UpperCAmelCase : Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase : List[str] =hidden_size
lowercase : Union[str, Any] =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : Tuple =intermediate_size
lowercase : str =hidden_act
lowercase : Optional[int] =hidden_dropout_prob
lowercase : Optional[int] =attention_probs_dropout_prob
lowercase : Optional[Any] =initializer_range
lowercase : str =layer_norm_eps
lowercase : List[Any] =image_size
lowercase : str =patch_size
lowercase : int =num_channels
lowercase : List[Any] =qkv_bias
lowercase : int =decoder_num_attention_heads
lowercase : str =decoder_hidden_size
lowercase : Tuple =decoder_num_hidden_layers
lowercase : str =decoder_intermediate_size
lowercase : Dict =mask_ratio
lowercase : Any =norm_pix_loss
| 94 |
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (KDPMaDiscreteScheduler,)
UpperCAmelCase : Any = 10
def lowerCAmelCase_ ( self : Dict , **_UpperCAmelCase : Optional[Any] ):
_A = {
'num_train_timesteps': 1_100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_UpperCAmelCase )
return config
def lowerCAmelCase_ ( self : Any ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type='v_prediction' )
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4E-0_7 ) < 1E-2
assert abs(result_mean.item() - 6.1_1_1_2E-1_0 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def lowerCAmelCase_ ( self : Optional[Any] ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
_A = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def lowerCAmelCase_ ( self : Any ):
if torch_device == "mps":
return
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
_A = self.dummy_model()
_A = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_A = output.prev_sample
_A = torch.sum(torch.abs(_UpperCAmelCase ) )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
if str(_UpperCAmelCase ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 7 | 0 |
def _lowerCAmelCase ( UpperCamelCase__: float , UpperCamelCase__: float ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 641 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _snake_case ( _snake_case : Optional[int] , _snake_case : Optional[Any]=10 ) -> Optional[int]:
'''simple docstring'''
_A = []
for _ in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Union[str, Any]=10 ) -> List[str]:
'''simple docstring'''
_A = []
for step in range(_snake_case ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_A = os.path.join(_snake_case , 'schedule.bin' )
torch.save(scheduler.state_dict() , _snake_case )
_A = torch.load(_snake_case )
scheduler.load_state_dict(_snake_case )
return lrs
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ):
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
_A = criterion(_UpperCAmelCase , _UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCAmelCase_ ( self : int ):
_A = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_UpperCAmelCase )
_A = torch.tensor([0.4, 0.2, -0.5] )
_A = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_A = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_UpperCAmelCase , weight_decay=0.0 , relative_step=_UpperCAmelCase , scale_parameter=_UpperCAmelCase , warmup_init=_UpperCAmelCase , )
for _ in range(1_000 ):
_A = criterion(_UpperCAmelCase , _UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCAmelCase : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCAmelCase : Dict = 10
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=None ):
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase , msg=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_A = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_A = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_A , _A = data
_A = scheduler_func(self.optimizer , **_UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_A = unwrap_schedule(_UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
_UpperCAmelCase , _UpperCAmelCase , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
_A = scheduler_func(self.optimizer , **_UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_UpperCAmelCase ) # wrap to test picklability of the schedule
_A = unwrap_and_save_reload_schedule(_UpperCAmelCase , self.num_steps )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase , msg=F'''failed for {scheduler_func} in save and reload''' )
class lowercase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int] ):
_A = fn
def __call__( self : Tuple , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[str] ):
return self.fn(*_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Any ):
_A = list(map(self , scheduler.lr_lambdas ) )
| 7 | 0 |
'''simple docstring'''
from manim import *
class _lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
def snake_case__ ( self ):
"""simple docstring"""
__A : Any = Rectangle(height=0.5 , width=0.5 )
__A : int = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__A : str = Rectangle(height=0.2_5 , width=0.2_5 )
__A : Union[str, Any] = [mem.copy() for i in range(6 )]
__A : Optional[int] = [mem.copy() for i in range(6 )]
__A : Tuple = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__A : List[Any] = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__A : int = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__A : Any = Text('CPU' , font_size=24 )
__A : Tuple = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
__A : Any = [mem.copy() for i in range(4 )]
__A : List[Any] = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__A : Optional[Any] = Text('GPU' , font_size=24 )
__A : List[Any] = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
__A : str = [mem.copy() for i in range(6 )]
__A : Tuple = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__A : List[str] = Text('Model' , font_size=24 )
__A : Optional[Any] = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
__A : List[str] = []
__A : str = []
for i, rect in enumerate(_UpperCAmelCase ):
__A : Dict = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
__A : List[str] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
__A : Any = [meta_mem.copy() for i in range(6 )]
__A : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
__A : Union[str, Any] = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__A : int = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__A : int = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
__A : Optional[int] = Text('Disk' , font_size=24 )
__A : str = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.2_5, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
__A : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__A : Optional[int] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
__A : Optional[int] = MarkupText(
F"""<span fgcolor=\'{BLUE}\'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
__A : Union[str, Any] = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
__A : Any = Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.0_2 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
__A : int = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__A : int = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
__A : List[str] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.0_2}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__A : int = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
__A : Any = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__A : List[str] = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__A : List[Any] = a_c
__A : Optional[Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
__A : int = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait()
| 365 |
"""simple docstring"""
import math
def _snake_case ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
if (
not isinstance(_snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def _snake_case ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
if (
not isinstance(_snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__magic_name__ : int = logging.get_logger(__name__)
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = b.T
UpperCamelCase : Union[str, Any] = np.sum(np.square(_snake_case ) , axis=1 )
UpperCamelCase : Any = np.sum(np.square(_snake_case ) , axis=0 )
UpperCamelCase : Optional[int] = np.matmul(_snake_case , _snake_case )
UpperCamelCase : Optional[int] = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = x.reshape(-1 , 3 )
UpperCamelCase : List[str] = squared_euclidean_distance(_snake_case , _snake_case )
return np.argmin(_snake_case , axis=1 )
class lowercase__ ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] = ['''pixel_values''']
def __init__( self , _A = None , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = True , _A = True , **_A , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCamelCase : Optional[Any] = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
UpperCamelCase : Optional[Any] = get_size_dict(_UpperCAmelCase )
UpperCamelCase : Tuple = np.array(_UpperCAmelCase ) if clusters is not None else None
UpperCamelCase : List[str] = do_resize
UpperCamelCase : Union[str, Any] = size
UpperCamelCase : str = resample
UpperCamelCase : Dict = do_normalize
UpperCamelCase : str = do_color_quantize
def _a ( self , _A , _A , _A = PILImageResampling.BILINEAR , _A = None , **_A , ):
'''simple docstring'''
UpperCamelCase : List[Any] = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
_UpperCAmelCase , size=(size["""height"""], size["""width"""]) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _a ( self , _A , _A = None , ):
'''simple docstring'''
UpperCamelCase : Dict = rescale(image=_UpperCAmelCase , scale=1 / 1_27.5 , data_format=_UpperCAmelCase )
UpperCamelCase : List[Any] = image - 1
return image
def _a ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
'''simple docstring'''
UpperCamelCase : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : Any = size if size is not None else self.size
UpperCamelCase : Optional[int] = get_size_dict(_UpperCAmelCase )
UpperCamelCase : List[str] = resample if resample is not None else self.resample
UpperCamelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : str = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCamelCase : Optional[int] = clusters if clusters is not None else self.clusters
UpperCamelCase : List[str] = np.array(_UpperCAmelCase )
UpperCamelCase : Tuple = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase : List[str] = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
UpperCamelCase : Union[str, Any] = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_normalize:
UpperCamelCase : List[Any] = [self.normalize(image=_UpperCAmelCase ) for image in images]
if do_color_quantize:
UpperCamelCase : Optional[Any] = [to_channel_dimension_format(_UpperCAmelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCamelCase : Dict = np.array(_UpperCAmelCase )
UpperCamelCase : Any = color_quantize(_UpperCAmelCase , _UpperCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCamelCase : Dict = images.shape[0]
UpperCamelCase : Union[str, Any] = images.reshape(_UpperCAmelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCamelCase : List[Any] = list(_UpperCAmelCase )
else:
UpperCamelCase : Tuple = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
UpperCamelCase : Union[str, Any] = {"""input_ids""": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 102 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = '''xmod'''
def __init__( self : str , _UpperCAmelCase : Optional[Any]=30_522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Dict=3_072 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Any=1E-1_2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Tuple=("en_XX",) , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
_A = pre_norm
_A = adapter_reduction_factor
_A = adapter_layer_norm
_A = adapter_reuse_layer_norm
_A = ln_before_adapter
_A = list(_UpperCAmelCase )
_A = default_language
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.task == "multiple-choice":
_A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 7 | 0 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = (KDPMaDiscreteScheduler,)
lowerCAmelCase_ = 10
def lowercase (self , **UpperCAmelCase ) -> Union[str, Any]:
_snake_case = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCAmelCase )
return config
def lowercase (self ) -> Optional[Any]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowercase (self ) -> Dict:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowercase (self ) -> Tuple:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowercase (self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowercase (self ) -> str:
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config(prediction_type="""v_prediction""" )
_snake_case = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
_snake_case = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_snake_case = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_snake_case = model(_UpperCAmelCase , _UpperCAmelCase )
_snake_case = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(_UpperCAmelCase ) )
_snake_case = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def lowercase (self ) -> Any:
if torch_device == "mps":
return
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
_snake_case = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_snake_case = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_snake_case = model(_UpperCAmelCase , _UpperCAmelCase )
_snake_case = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(_UpperCAmelCase ) )
_snake_case = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def lowercase (self ) -> List[Any]:
if torch_device == "mps":
return
_snake_case = self.scheduler_classes[0]
_snake_case = self.get_scheduler_config()
_snake_case = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
_snake_case = self.dummy_model()
_snake_case = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_snake_case = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_snake_case = model(_UpperCAmelCase , _UpperCAmelCase )
_snake_case = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_snake_case = output.prev_sample
_snake_case = torch.sum(torch.abs(_UpperCAmelCase ) )
_snake_case = torch.mean(torch.abs(_UpperCAmelCase ) )
if str(_UpperCAmelCase ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3 | 585 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a = logging.get_logger(__name__)
a = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : Optional[Any] ):
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
_A = model
_A = kwargs.get('model_save_dir' , _UpperCAmelCase )
_A = kwargs.get('latest_model_name' , _UpperCAmelCase )
def __call__( self : Dict , **_UpperCAmelCase : List[Any] ):
_A = {k: np.array(_UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase )
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[Any]=None ):
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
_A = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : List[Any] ):
_A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
_A = self.model_save_dir.joinpath(self.latest_model_name )
_A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_A = self.model_save_dir.joinpath(_UpperCAmelCase )
if src_path.exists():
_A = Path(_UpperCAmelCase ).joinpath(_UpperCAmelCase )
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase )
except shutil.SameFileError:
pass
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] , ):
if os.path.isfile(_UpperCAmelCase ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : Tuple , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : Optional[Union[bool, str, None]] = None , _UpperCAmelCase : Optional[Union[str, None]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional["ort.SessionOptions"] = None , **_UpperCAmelCase : Union[str, Any] , ):
_A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase ):
_A = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
_A = Path(_UpperCAmelCase )
# load model from hub
else:
# download model
_A = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
_A = Path(_UpperCAmelCase ).parent
_A = Path(_UpperCAmelCase ).name
_A = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase )
return cls(model=_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , _UpperCAmelCase : Union[str, Path] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : Tuple , ):
_A = None
if len(str(_UpperCAmelCase ).split('@' ) ) == 2:
_A , _A = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
| 7 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(_snake_case , 2 ) - pow(_snake_case , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_snake_case , 2 ) - pow(_snake_case , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_snake_case , 2 ) + pow(_snake_case , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = '''speech_to_text'''
UpperCAmelCase : List[Any] = ['''past_key_values''']
UpperCAmelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , _UpperCAmelCase : Union[str, Any]=10_000 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : int=2_048 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : List[str]=6 , _UpperCAmelCase : Tuple=2_048 , _UpperCAmelCase : str=4 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : List[Any]=256 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[str]=6_000 , _UpperCAmelCase : Optional[Any]=1_024 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Any=(5, 5) , _UpperCAmelCase : int=1_024 , _UpperCAmelCase : str=80 , _UpperCAmelCase : Any=1 , **_UpperCAmelCase : Tuple , ):
_A = vocab_size
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = use_cache
_A = encoder_layers
_A = scale_embedding # scale factor will be sqrt(d_model) if True
_A = max_source_positions
_A = max_target_positions
_A = num_conv_layers
_A = list(_UpperCAmelCase )
_A = conv_channels
_A = input_feat_per_channel
_A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 7 | 0 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
UpperCamelCase_ : Optional[int] = 8
def UpperCamelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int=BITS ) -> int:
'''simple docstring'''
_lowercase : Tuple = x.device
_lowercase : Tuple = (x * 255).int().clamp(0 , 255 )
_lowercase : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_snake_case )
_lowercase : Tuple = rearrange(_snake_case , "d -> d 1 1" )
_lowercase : Optional[Any] = rearrange(_snake_case , "b c h w -> b c 1 h w" )
_lowercase : Any = ((x & mask) != 0).float()
_lowercase : Optional[int] = rearrange(_snake_case , "b c d h w -> b (c d) h w" )
_lowercase : Tuple = bits * 2 - 1
return bits
def UpperCamelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : int=BITS ) -> List[Any]:
'''simple docstring'''
_lowercase : List[str] = x.device
_lowercase : Optional[Any] = (x > 0).int()
_lowercase : str = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_snake_case , dtype=torch.intaa )
_lowercase : Dict = rearrange(_snake_case , "d -> d 1 1" )
_lowercase : str = rearrange(_snake_case , "b (c d) h w -> b c d h w" , d=8 )
_lowercase : Dict = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 255).clamp(0.0 , 1.0 )
def UpperCamelCase ( self : Dict , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : str=None , _UpperCAmelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_lowercase : List[Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_lowercase : List[Any] = self.alphas_cumprod[timestep]
_lowercase : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_lowercase : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_lowercase : Tuple = self.bit_scale
if self.config.clip_sample:
_lowercase : str = torch.clamp(_snake_case , -scale , _snake_case )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_lowercase : Tuple = self._get_variance(_snake_case , _snake_case )
_lowercase : Dict = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_lowercase : List[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : Any = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : Union[str, Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_lowercase : Union[str, Any] = model_output.device if torch.is_tensor(_snake_case ) else "cpu"
_lowercase : Optional[int] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_snake_case ).to(_snake_case )
_lowercase : Any = self._get_variance(_snake_case , _snake_case ) ** 0.5 * eta * noise
_lowercase : str = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_snake_case , pred_original_sample=_snake_case )
def UpperCamelCase ( self : Tuple , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : Optional[int]="epsilon" , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
'''simple docstring'''
_lowercase : Optional[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_lowercase , _lowercase : Optional[int] = torch.split(_snake_case , sample.shape[1] , dim=1 )
else:
_lowercase : Dict = None
# 1. compute alphas, betas
_lowercase : Dict = self.alphas_cumprod[t]
_lowercase : Any = self.alphas_cumprod[t - 1] if t > 0 else self.one
_lowercase : Dict = 1 - alpha_prod_t
_lowercase : int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_lowercase : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_lowercase : Union[str, Any] = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
_lowercase : Optional[Any] = self.bit_scale
if self.config.clip_sample:
_lowercase : str = torch.clamp(_snake_case , -scale , _snake_case )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase : Tuple = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_lowercase : int = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowercase : str = 0
if t > 0:
_lowercase : Any = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_snake_case ).to(model_output.device )
_lowercase : Any = (self._get_variance(_snake_case , predicted_variance=_snake_case ) ** 0.5) * noise
_lowercase : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_snake_case , pred_original_sample=_snake_case )
class __lowercase ( __lowerCAmelCase ):
def __init__(self : Optional[int] , snake_case : UNetaDConditionModel , snake_case : Union[DDIMScheduler, DDPMScheduler] , snake_case : Optional[float] = 1.0 , ) -> List[Any]:
super().__init__()
_lowercase : Union[str, Any] = bit_scale
_lowercase : int = (
ddim_bit_scheduler_step if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__(self : List[str] , snake_case : Optional[int] = 256 , snake_case : Optional[int] = 256 , snake_case : Optional[int] = 50 , snake_case : Optional[torch.Generator] = None , snake_case : Optional[int] = 1 , snake_case : Optional[str] = "pil" , snake_case : bool = True , **snake_case : Union[str, Any] , ) -> Dict:
_lowercase : str = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_UpperCAmelCase , )
_lowercase : Union[str, Any] = decimal_to_bits(_UpperCAmelCase ) * self.bit_scale
_lowercase : Dict = latents.to(self.device )
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_lowercase : Any = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
_lowercase : Union[str, Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
_lowercase : str = bits_to_decimal(_UpperCAmelCase )
if output_type == "pil":
_lowercase : Dict = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 461 |
"""simple docstring"""
from manim import *
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Rectangle(height=0.5 , width=0.5 )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_A = Rectangle(height=0.25 , width=0.25 )
_A = [mem.copy() for i in range(6 )]
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('CPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(4 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('GPU' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
_A = [mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Model' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
_A = []
_A = []
for i, rect in enumerate(_UpperCAmelCase ):
_A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 )
target.move_to(_UpperCAmelCase )
model_arr.append(_UpperCAmelCase )
_A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
_A = [meta_mem.copy() for i in range(6 )]
_A = [meta_mem.copy() for i in range(6 )]
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
_A = Text('Disk' , font_size=24 )
_A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_A = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
_A = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
_A = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase ) )
_A = Square(0.3 )
input.set_fill(_UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 )
self.play(Write(_UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(_UpperCAmelCase ) )
self.play(FadeOut(_UpperCAmelCase ) )
_A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_A = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
_A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_A = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_A = AnimationGroup(
FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_A = 0.7
self.play(
Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_A = a_c
_A = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , )
_A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) )
self.wait()
| 7 | 0 |
'''simple docstring'''
import os
from distutils.util import strtobool
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for e in env_keys:
lowerCAmelCase__ : Any = int(os.environ.get(_snake_case , -1 ) )
if val >= 0:
return val
return default
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCAmelCase__ : int = os.environ.get(_snake_case , str(_snake_case ) )
return strtobool(_snake_case ) == 1 # As its name indicates `strtobool` actually returns an int...
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase="no" ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = os.environ.get(_snake_case , str(_snake_case ) )
return value
| 565 |
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def _snake_case ( ) -> None:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 7 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : Any = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __magic_name__ ( __lowerCAmelCase ):
UpperCamelCase : Optional[Any] = '''mctct'''
def __init__( self , __magic_name__=8_0_6_5 , __magic_name__=1_5_3_6 , __magic_name__=3_6 , __magic_name__=6_1_4_4 , __magic_name__=4 , __magic_name__=3_8_4 , __magic_name__=9_2_0 , __magic_name__=1e-5 , __magic_name__=0.3 , __magic_name__="relu" , __magic_name__=0.02 , __magic_name__=0.3 , __magic_name__=0.3 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__=1 , __magic_name__=0.3 , __magic_name__=1 , __magic_name__=(7,) , __magic_name__=(3,) , __magic_name__=8_0 , __magic_name__=1 , __magic_name__=None , __magic_name__="sum" , __magic_name__=False , **__magic_name__ , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = attention_head_dim
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = layerdrop
_lowerCAmelCase = hidden_act
_lowerCAmelCase = initializer_range
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = conv_glu_dim
_lowerCAmelCase = conv_dropout
_lowerCAmelCase = num_conv_layers
_lowerCAmelCase = input_feat_per_channel
_lowerCAmelCase = input_channels
_lowerCAmelCase = conv_channels
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
_lowerCAmelCase = list(_UpperCAmelCase )
_lowerCAmelCase = list(_UpperCAmelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 589 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
a = logging.getLogger(__name__)
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''whether to use adafactor'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(default=__lowerCAmelCase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[float] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
UpperCAmelCase : Optional[str] = field(
default='''linear''' , metadata={'''help''': f'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 7 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class snake_case_ ( __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase = '''trajectory_transformer'''
lowerCamelCase = ['''past_key_values''']
lowerCamelCase = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : int , __magic_name__ : Dict=100 , __magic_name__ : Optional[int]=5 , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[Any]=1 , __magic_name__ : List[str]=249 , __magic_name__ : List[Any]=6 , __magic_name__ : Tuple=17 , __magic_name__ : List[Any]=25 , __magic_name__ : Tuple=4 , __magic_name__ : Dict=4 , __magic_name__ : int=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Optional[Any]=0.0006 , __magic_name__ : Tuple=512 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=1e-12 , __magic_name__ : Optional[Any]=1 , __magic_name__ : Optional[int]=True , __magic_name__ : int=1 , __magic_name__ : Optional[Any]=5_0256 , __magic_name__ : List[str]=5_0256 , **__magic_name__ : Optional[Any] , ) -> int:
lowerCamelCase_ : List[Any] = vocab_size
lowerCamelCase_ : Optional[int] = action_weight
lowerCamelCase_ : Dict = reward_weight
lowerCamelCase_ : Optional[int] = value_weight
lowerCamelCase_ : int = max_position_embeddings
lowerCamelCase_ : Union[str, Any] = block_size
lowerCamelCase_ : Optional[Any] = action_dim
lowerCamelCase_ : Union[str, Any] = observation_dim
lowerCamelCase_ : List[Any] = transition_dim
lowerCamelCase_ : str = learning_rate
lowerCamelCase_ : Any = n_layer
lowerCamelCase_ : List[str] = n_head
lowerCamelCase_ : List[str] = n_embd
lowerCamelCase_ : Optional[int] = embd_pdrop
lowerCamelCase_ : Tuple = attn_pdrop
lowerCamelCase_ : Optional[Any] = resid_pdrop
lowerCamelCase_ : int = initializer_range
lowerCamelCase_ : Dict = layer_norm_eps
lowerCamelCase_ : str = kaiming_initializer_range
lowerCamelCase_ : Any = use_cache
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 488 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 7 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Optional[int] ={
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple =["""PoolFormerFeatureExtractor"""]
A_ : Tuple =["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] =[
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A_ : str =_LazyModule(__name__, globals()["""__file__"""], _import_structure) | 483 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : UNetaDModel
UpperCAmelCase : KarrasVeScheduler
def __init__( self : Any , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Optional[Any] , ):
_A = self.unet.config.sample_size
_A = (batch_size, 3, img_size, img_size)
_A = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_A = self.scheduler.schedule[t]
_A = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_A , _A = self.scheduler.add_noise_to_input(_UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_A = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_A = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_A = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_A = self.scheduler.step_correct(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , step_output.prev_sample , step_output['derivative'] , )
_A = step_output.prev_sample
_A = (sample / 2 + 0.5).clamp(0 , 1 )
_A = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 7 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self : int , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 94 |
"""simple docstring"""
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = None
_A = None
_A = graph
self._normalize_graph(_UpperCAmelCase , _UpperCAmelCase )
_A = len(_UpperCAmelCase )
_A = None
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_UpperCAmelCase ) > 1 or len(_UpperCAmelCase ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ):
_A = algorithm(self )
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Union[str, Any] ):
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
self._algorithm()
_A = True
def lowerCAmelCase_ ( self : int ):
pass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Any ):
super().__init__(_UpperCAmelCase )
# use this to save your result
_A = -1
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[Any] ):
super().__init__(_UpperCAmelCase )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def lowerCAmelCase_ ( self : Dict ):
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(_UpperCAmelCase ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(_UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_UpperCAmelCase ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_UpperCAmelCase , _UpperCAmelCase )
self.relabel(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int ):
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a = [0]
a = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 7 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : Any = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 641 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = SpeechTaTokenizer
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[int] = True
def lowerCAmelCase_ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
_A = SpeechTaTokenizer(_UpperCAmelCase )
_A = AddedToken('<mask>' , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase )
_A = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
_A = 'this is a test'
_A = 'this is a test'
return input_text, output_text
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Dict=20 , _UpperCAmelCase : str=5 ):
_A , _A = self.get_input_output_texts(_UpperCAmelCase )
_A = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
_A = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = '<pad>'
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(_UpperCAmelCase ) , 81 )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase_ ( self : Any ):
_A = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_A = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_A = tokenizer.add_tokens(_UpperCAmelCase )
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size + len(_UpperCAmelCase ) )
_A = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_A = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_A = tokenizer.add_special_tokens(_UpperCAmelCase )
_A = tokenizer.vocab_size
_A = len(_UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , 0 )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , len(_UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , all_size_a + len(_UpperCAmelCase ) )
_A = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_UpperCAmelCase )
self.assertGreaterEqual(len(_UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase_ ( self : str ):
pass
def lowerCAmelCase_ ( self : Any ):
pass
def lowerCAmelCase_ ( self : Dict ):
_A = self.get_tokenizer()
_A = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
_A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# fmt: off
self.assertListEqual(_UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_A = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
# Use custom sequence because this tokenizer does not handle numbers.
_A = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_A = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_UpperCAmelCase , )
| 7 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _lowerCamelCase :
'''simple docstring'''
__lowercase : List[str]
__lowercase : Optional[str] = None
# Automatically constructed
__lowercase : ClassVar[str] = "dict"
__lowercase : ClassVar[Any] = None
__lowercase : str = field(default='''Translation''' , init=__lowerCAmelCase , repr=__lowerCAmelCase )
def __call__( self ):
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def snake_case__ ( self ):
"""simple docstring"""
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class _lowerCamelCase :
'''simple docstring'''
__lowercase : Optional[List] = None
__lowercase : Optional[int] = None
__lowercase : Optional[str] = None
# Automatically constructed
__lowercase : ClassVar[str] = "dict"
__lowercase : ClassVar[Any] = None
__lowercase : str = field(default='''TranslationVariableLanguages''' , init=__lowerCAmelCase , repr=__lowerCAmelCase )
def snake_case__ ( self ):
"""simple docstring"""
__A : Optional[int] = sorted(set(self.languages ) ) if self.languages else None
__A : Union[str, Any] = len(self.languages ) if self.languages else None
def __call__( self ):
"""simple docstring"""
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : Tuple = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({', '.join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__A : List[Any] = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__A ,__A : Union[str, Any] = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def snake_case__ ( self ):
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 365 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 7 | 0 |
"""simple docstring"""
from math import sqrt
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = 0
for i in range(1 , int(sqrt(_snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(_snake_case ):
total += i + n // i
elif i == sqrt(_snake_case ):
total += i
return total - n
def UpperCamelCase (SCREAMING_SNAKE_CASE = 1_0000 ):
UpperCamelCase : Dict = sum(
i
for i in range(1 , _snake_case )
if sum_of_divisors(sum_of_divisors(_snake_case ) ) == i and sum_of_divisors(_snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 102 |
"""simple docstring"""
import argparse
a = '''docs/source/_static/js/custom.js'''
def _snake_case ( _snake_case : Dict ) -> Any:
'''simple docstring'''
with open(_snake_case , encoding='utf-8' , newline='\n' ) as f:
_A = f.readlines()
_A = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
_A = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(_snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
a = parser.parse_args()
update_custom_js(args.version)
| 7 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowerCAmelCase = logging.getLogger(__name__)
__lowerCAmelCase = tf.data.AUTOTUNE
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=_snake_case , default="""roberta-base""" , help="""The model config to use. Note that we don\'t copy the model\'s weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=_snake_case , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=_snake_case , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=_snake_case , help="""Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=_snake_case , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=_snake_case , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=_snake_case , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=_snake_case , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=_snake_case , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=_snake_case , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=_snake_case , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=_snake_case , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=_snake_case , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=_snake_case , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=_snake_case , required=_snake_case , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=_snake_case , help="""Model ID to upload to on the Hugging Face Hub.""" )
_snake_case = parser.parse_args()
return args
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
try:
if args.tpu_name:
_snake_case = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_snake_case = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(_snake_case )
tf.tpu.experimental.initialize_tpu_system(_snake_case )
return tpu
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = 0
for file in file_list:
_snake_case = file.split("""/""" )[-1]
_snake_case = re.search(R"""-\d+-(\d+)\.tfrecord""" , _snake_case ).group(1 )
_snake_case = int(_snake_case )
num_samples += sample_count
return num_samples
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
_snake_case = count_samples(_snake_case )
_snake_case = tf.data.Dataset.from_tensor_slices(_snake_case )
if shuffle:
_snake_case = dataset.shuffle(len(_snake_case ) )
_snake_case = tf.data.TFRecordDataset(_snake_case , num_parallel_reads=_snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_snake_case = dataset.apply(tf.data.experimental.assert_cardinality(_snake_case ) )
_snake_case = dataset.map(_snake_case , num_parallel_calls=_snake_case )
if shuffle:
assert shuffle_buffer_size is not None
_snake_case = dataset.shuffle(args.shuffle_buffer_size )
_snake_case = dataset.batch(_snake_case , drop_remainder=_snake_case )
_snake_case = dataset.map(_snake_case , num_parallel_calls=_snake_case )
_snake_case = dataset.prefetch(_snake_case )
return dataset
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if not args.no_tpu:
_snake_case = initialize_tpu(_snake_case )
_snake_case = tf.distribute.TPUStrategy(_snake_case )
else:
_snake_case = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
_snake_case = AutoTokenizer.from_pretrained(args.tokenizer )
_snake_case = AutoConfig.from_pretrained(args.pretrained_model_config )
_snake_case = tokenizer.vocab_size
_snake_case = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
_snake_case = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
_snake_case = count_samples(_snake_case )
_snake_case = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_snake_case = steps_per_epoch * args.num_epochs
with strategy.scope():
_snake_case = TFAutoModelForMaskedLM.from_config(_snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_snake_case, _snake_case = create_optimizer(
num_train_steps=_snake_case , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_snake_case , metrics=["""accuracy"""] )
def decode_fn(_SCREAMING_SNAKE_CASE ):
_snake_case = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_snake_case , _snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_snake_case = DataCollatorForLanguageModeling(
tokenizer=_snake_case , mlm_probability=args.mlm_probability , mlm=_snake_case , return_tensors="""tf""" )
def mask_with_collator(_SCREAMING_SNAKE_CASE ):
# TF really needs an isin() function
_snake_case = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
_snake_case, _snake_case = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(_snake_case ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_snake_case , )
return batch
_snake_case = args.per_replica_batch_size * strategy.num_replicas_in_sync
_snake_case = prepare_dataset(
_snake_case , decode_fn=_snake_case , mask_fn=_snake_case , batch_size=_snake_case , shuffle=_snake_case , shuffle_buffer_size=args.shuffle_buffer_size , )
_snake_case = prepare_dataset(
_snake_case , decode_fn=_snake_case , mask_fn=_snake_case , batch_size=_snake_case , shuffle=_snake_case , )
_snake_case = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_snake_case ) )
model.fit(
_snake_case , validation_data=_snake_case , epochs=args.num_epochs , callbacks=_snake_case , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowerCAmelCase = parse_args()
main(args) | 585 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''vit_mae'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Optional[int]=3_072 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : Optional[Any]=224 , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=16 , _UpperCAmelCase : str=512 , _UpperCAmelCase : int=8 , _UpperCAmelCase : List[Any]=2_048 , _UpperCAmelCase : Optional[Any]=0.75 , _UpperCAmelCase : List[str]=False , **_UpperCAmelCase : Union[str, Any] , ):
super().__init__(**_UpperCAmelCase )
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = image_size
_A = patch_size
_A = num_channels
_A = qkv_bias
_A = decoder_num_attention_heads
_A = decoder_hidden_size
_A = decoder_num_hidden_layers
_A = decoder_intermediate_size
_A = mask_ratio
_A = norm_pix_loss
| 7 | 0 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> int:
'''simple docstring'''
if len(_snake_case ) != len(_snake_case ):
raise ValueError('String lengths must match!' )
SCREAMING_SNAKE_CASE__ :List[str] = 0
for chara, chara in zip(_snake_case , _snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
a = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _snake_case ( _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
_A = torch.load(_snake_case , map_location='cpu' )
return sd
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Tuple=rename_keys_prefix ) -> List[str]:
'''simple docstring'''
_A = OrderedDict()
_A = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
_A = key
for name_pair in rename_keys_prefix:
_A = new_key.replace(name_pair[0] , name_pair[1] )
_A = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
_A = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def _snake_case ( _snake_case : List[str] , _snake_case : Dict ) -> Dict:
'''simple docstring'''
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
_A = 'pretraining'
if "vcr" in checkpoint_path:
_A = {'visual_embedding_dim': 5_12}
elif "vqa_advanced" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
elif "vqa" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
elif "nlvr" in checkpoint_path:
_A = {'visual_embedding_dim': 10_24}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
_A = {'visual_embedding_dim': 5_12}
_A = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48}
_A = 'vqa_advanced'
elif "vqa" in checkpoint_path:
_A = {'visual_embedding_dim': 20_48, 'num_labels': 31_29}
_A = 'vqa'
elif "nlvr" in checkpoint_path:
_A = {
'visual_embedding_dim': 10_24,
'num_labels': 2,
}
_A = 'nlvr'
_A = VisualBertConfig(**_snake_case )
# Load State Dict
_A = load_state_dict(_snake_case )
_A = get_new_dict(_snake_case , _snake_case )
if model_type == "pretraining":
_A = VisualBertForPreTraining(_snake_case )
elif model_type == "vqa":
_A = VisualBertForQuestionAnswering(_snake_case )
elif model_type == "nlvr":
_A = VisualBertForVisualReasoning(_snake_case )
elif model_type == "multichoice":
_A = VisualBertForMultipleChoice(_snake_case )
model.load_state_dict(_snake_case )
# Save Checkpoints
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
a = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 7 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : List[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Optional[int] = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __lowercase ( __lowerCAmelCase ):
_A = '''pegasus'''
_A = ['''past_key_values''']
_A = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self : Optional[Any] , snake_case : List[Any]=5_0265 , snake_case : Optional[int]=1024 , snake_case : Any=12 , snake_case : Optional[Any]=4096 , snake_case : str=16 , snake_case : Any=12 , snake_case : List[str]=4096 , snake_case : Any=16 , snake_case : Any=0.0 , snake_case : Optional[Any]=0.0 , snake_case : Union[str, Any]=True , snake_case : Union[str, Any]=True , snake_case : Optional[Any]="gelu" , snake_case : Any=1024 , snake_case : List[str]=0.1 , snake_case : int=0.0 , snake_case : List[Any]=0.0 , snake_case : Optional[Any]=0.02 , snake_case : Tuple=0 , snake_case : Optional[Any]=False , snake_case : Union[str, Any]=0 , snake_case : Any=1 , snake_case : Tuple=1 , **snake_case : str , ) -> int:
_lowercase : int = vocab_size
_lowercase : Dict = max_position_embeddings
_lowercase : Any = d_model
_lowercase : Optional[int] = encoder_ffn_dim
_lowercase : Dict = encoder_layers
_lowercase : Dict = encoder_attention_heads
_lowercase : List[str] = decoder_ffn_dim
_lowercase : Optional[Any] = decoder_layers
_lowercase : List[str] = decoder_attention_heads
_lowercase : int = dropout
_lowercase : Union[str, Any] = attention_dropout
_lowercase : str = activation_dropout
_lowercase : Dict = activation_function
_lowercase : Optional[Any] = init_std
_lowercase : Tuple = encoder_layerdrop
_lowercase : int = decoder_layerdrop
_lowercase : List[Any] = use_cache
_lowercase : List[str] = encoder_layers
_lowercase : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
@property
def _a(self : Optional[int] ) -> Tuple:
return self.encoder_attention_heads
@property
def _a(self : Optional[int] ) -> List[Any]:
return self.d_model
| 461 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
for param in module.parameters():
_A = False
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_A = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
_A = plt.imshow(_snake_case )
fig.axes.get_xaxis().set_visible(_snake_case )
fig.axes.get_yaxis().set_visible(_snake_case )
plt.show()
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = datetime.now()
_A = current_time.strftime('%H:%M:%S' )
return timestamp
| 7 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_( __lowerCAmelCase ):
'''simple docstring'''
__lowercase : Any = ['''image_processor''', '''tokenizer''']
__lowercase : Optional[int] = '''ViTImageProcessor'''
__lowercase : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_UpperCAmelCase ,)
lowerCAmelCase__ : List[Any] = kwargs.pop("""feature_extractor""" )
lowerCAmelCase__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_UpperCAmelCase ,_UpperCAmelCase )
def __call__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> List[Any]:
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
lowerCAmelCase__ : Union[str, Any] = self.tokenizer(_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,**_UpperCAmelCase )
if visual_prompt is not None:
lowerCAmelCase__ : List[Any] = self.image_processor(_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,**_UpperCAmelCase )
if images is not None:
lowerCAmelCase__ : List[str] = self.image_processor(_UpperCAmelCase ,return_tensors=_UpperCAmelCase ,**_UpperCAmelCase )
if visual_prompt is not None and images is not None:
lowerCAmelCase__ : Tuple = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowerCAmelCase__ : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowerCAmelCase__ : int = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) ,tensor_type=_UpperCAmelCase )
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
return self.tokenizer.batch_decode(*_UpperCAmelCase ,**_UpperCAmelCase )
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
return self.tokenizer.decode(*_UpperCAmelCase ,**_UpperCAmelCase )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_UpperCAmelCase ,)
return self.image_processor_class
@property
def UpperCAmelCase_ ( self ) -> Dict:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_UpperCAmelCase ,)
return self.image_processor
| 565 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
UpperCAmelCase : Optional[int] = '''ViTImageProcessor'''
UpperCAmelCase : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Tuple , _UpperCAmelCase : int=None , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : Dict ):
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[Any] , _UpperCAmelCase : int=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Union[str, Any] ):
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
_A = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
_A = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_A = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self : Tuple ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 7 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __magic_name__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
_lowerCAmelCase = DisjunctiveConstraint(_UpperCAmelCase )
self.assertTrue(isinstance(dc.token_ids , _UpperCAmelCase ) )
with self.assertRaises(_UpperCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_UpperCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_UpperCAmelCase ):
DisjunctiveConstraint(_UpperCAmelCase ) # fails here
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [[1, 2, 3], [1, 2, 4]]
_lowerCAmelCase = DisjunctiveConstraint(_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(1 )
_lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(_UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(2 )
_lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(_UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(3 )
_lowerCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(_UpperCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_lowerCAmelCase = DisjunctiveConstraint(_UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 589 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def _snake_case ( _snake_case : int ) -> datetime:
'''simple docstring'''
_A = year % 19
_A = year % 4
_A = year % 7
_A = math.floor(year / 1_00 )
_A = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_A = leap_day_inhibits / 4
_A = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_A = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_A = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_A = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 18 )
else:
return datetime(_snake_case , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
a = '''will be''' if year > datetime.now().year else '''was'''
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 7 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
snake_case_ : Optional[Any] = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 488 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''gpt_bigcode'''
UpperCAmelCase : str = ['''past_key_values''']
UpperCAmelCase : Dict = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , _UpperCAmelCase : Dict=50_257 , _UpperCAmelCase : List[Any]=1_024 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : str="gelu_pytorch_tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[Any]=1E-5 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=50_256 , _UpperCAmelCase : Dict=50_256 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Any=True , **_UpperCAmelCase : Any , ):
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = n_inner
_A = activation_function
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = scale_attn_weights
_A = use_cache
_A = attention_softmax_in_fpaa
_A = scale_attention_softmax_in_fpaa
_A = multi_query
_A = bos_token_id
_A = eos_token_id
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
| 7 | 0 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowercase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ):
"""simple docstring"""
super().__init__()
a_ = pad_token_id
a_ = max_length
a_ = vocab
a_ = merges
a_ = BytePairTokenizer(_UpperCAmelCase , _UpperCAmelCase , sequence_length=_UpperCAmelCase )
@classmethod
def lowercase__ ( cls , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ):
"""simple docstring"""
a_ = [""" """.join(_UpperCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
a_ = tokenizer.get_vocab()
return cls(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowercase__ ( cls , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ):
"""simple docstring"""
a_ = GPTaTokenizer.from_pretrained(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
return cls.from_tokenizer(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowercase__ ( cls , _UpperCAmelCase ):
"""simple docstring"""
return cls(**_UpperCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
"""simple docstring"""
a_ = self.tf_tokenizer(_UpperCAmelCase )
a_ = tf.ones_like(_UpperCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
a_ = max_length if max_length is not None else self.max_length
if max_length is not None:
a_ , a_ = pad_model_inputs(
_UpperCAmelCase , max_seq_length=_UpperCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids} | 483 |
"""simple docstring"""
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
return " ".join(
''.join(word[::-1] ) if len(_snake_case ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 7 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.