code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCamelCase : str = getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ = 8 , A_ = 10_24 , A_="val" , A_=None , A_=False , A_="summarization" , A_=None , A_=1 , A_ = None , A_="" , **A_ , ):
lowerCAmelCase__ : Any = str(A_ )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=A_ )
lowerCAmelCase__ : Union[str, Any] = Path(A_ )
lowerCAmelCase__ : Any = save_dir.joinpath(f'rank_{local_rank}_output.json' )
torch.cuda.set_device(A_ )
lowerCAmelCase__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(A_ ).cuda()
if fpaa:
lowerCAmelCase__ : int = model.half()
# determine if we need to increase num_beams
use_task_specific_params(A_ , A_ ) # update config with task specific params
lowerCAmelCase__ : Any = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
lowerCAmelCase__ : Union[str, Any] = num_return_sequences
lowerCAmelCase__ : str = AutoTokenizer.from_pretrained(A_ )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
lowerCAmelCase__ : Optional[int] = tokenizer.model_max_length
if prefix is None:
lowerCAmelCase__ : str = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
lowerCAmelCase__ : List[Any] = SeqaSeqDataset(
A_ , A_ , A_ , max_target_length=10_24 , type_path=A_ , n_obs=A_ , prefix=A_ , **A_ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
lowerCAmelCase__ : List[Any] = ds.make_sortish_sampler(A_ , distributed=A_ , add_extra_examples=A_ , shuffle=A_ )
lowerCAmelCase__ : List[str] = DataLoader(A_ , sampler=A_ , batch_size=A_ , collate_fn=ds.collate_fn )
lowerCAmelCase__ : List[str] = []
for batch in tqdm(A_ ):
lowerCAmelCase__ : Tuple = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=A_ , num_beams=A_ , **A_ , )
lowerCAmelCase__ : Tuple = tokenizer.batch_decode(A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ )
lowerCAmelCase__ : List[Any] = batch['''ids''']
if num_return_sequences > 1:
lowerCAmelCase__ : List[Any] = chunks(A_ , A_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(A_ ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(A_ , A_ )
return results, sampler.num_replicas
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=A_ , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=A_ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=A_ , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=A_ , default=A_ )
parser.add_argument(
'''--type_path''' , type=A_ , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=A_ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A_ , default=8 , required=A_ , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=A_ , default=-1 , required=A_ , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=A_ , default=A_ , required=A_ , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=A_ , default=1 , required=A_ , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=A_ , default=6_00 , required=A_ , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=A_ , default=A_ , required=A_ )
parser.add_argument('''--tgt_lang''' , type=A_ , default=A_ , required=A_ )
parser.add_argument(
'''--prefix''' , type=A_ , required=A_ , default=A_ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
lowerCAmelCase__ : List[Any] = time.time()
lowerCAmelCase__ ,lowerCAmelCase__ : Any = parser.parse_known_args()
lowerCAmelCase__ : List[Any] = parse_numeric_n_bool_cl_kwargs(A_ )
if generate_kwargs and args.local_rank <= 0:
print(f'parsed the following generate kwargs: {generate_kwargs}' )
lowerCAmelCase__ : Union[str, Any] = Path(args.save_dir + '''_tmp''' )
Path(A_ ).mkdir(exist_ok=A_ ) # this handles locking.
lowerCAmelCase__ : List[str] = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
lowerCAmelCase__ : Any = {}
if args.src_lang is not None:
lowerCAmelCase__ : List[Any] = args.src_lang
if args.tgt_lang is not None:
lowerCAmelCase__ : Tuple = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Any = eval_data_dir(
args.data_dir , A_ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=A_ , **A_ , )
if args.local_rank <= 0:
lowerCAmelCase__ : List[str] = Path(args.save_dir )
save_dir.mkdir(exist_ok=A_ )
lowerCAmelCase__ : Tuple = gather_results_from_each_node(A_ , A_ , args.sync_timeout )
lowerCAmelCase__ : str = combine_partial_results(A_ )
if args.num_return_sequences > 1:
lowerCAmelCase__ : Optional[int] = save_dir.joinpath('''pseudolabel_results.json''' )
print(f'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(A_ , A_ )
return
lowerCAmelCase__ : List[str] = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(A_ ) as f:
lowerCAmelCase__ : Dict = [x.rstrip() for x in f.readlines()][: len(A_ )]
# Calculate metrics, save metrics, and save _generations.txt
lowerCAmelCase__ : Optional[Any] = '''translation''' in args.task
lowerCAmelCase__ : List[Any] = calculate_bleu if calc_bleu else calculate_rouge
lowerCAmelCase__ : str = '''bleu''' if calc_bleu else '''rouge'''
lowerCAmelCase__ : Dict = score_fn(A_ , A_ )
lowerCAmelCase__ : Union[str, Any] = len(A_ )
lowerCAmelCase__ : Dict = time.time() - start_time
lowerCAmelCase__ : str = round(runtime / metrics['''n_obs'''] , 4 )
lowerCAmelCase__ : List[Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
lowerCAmelCase__ : str = save_dir.joinpath(f'{args.type_path}_{metric_name}.json' )
save_json(A_ , A_ , indent=A_ )
print(A_ )
write_txt_file(A_ , save_dir.joinpath(f'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(A_ , save_dir.joinpath(f'{args.type_path}.target' ) )
else:
shutil.rmtree(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Dict = []
for partial_result in partial_results:
records.extend(A_ )
lowerCAmelCase__ : str = sorted(A_ , key=lambda A_ : x["id"] )
lowerCAmelCase__ : str = [x['''pred'''] for x in records]
return preds
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
# WAIT FOR lots of .json files
lowerCAmelCase__ : Union[str, Any] = time.time()
logger.info('''waiting for all nodes to finish''' )
lowerCAmelCase__ : Union[str, Any] = None
while (time.time() - start_wait) < timeout:
lowerCAmelCase__ : Union[str, Any] = list(save_dir.glob('''rank_*.json''' ) )
if len(A_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
lowerCAmelCase__ : int = lmap(A_ , A_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 106 |
def A_ ( snake_case : str ) -> int:
'''simple docstring'''
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , snake_case )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 328 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( A__ : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCAmelCase_ = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class lowerCamelCase__( __lowerCamelCase):
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: ArgumentParser ):
__lowerCamelCase = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=UpperCamelCase_ , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self: List[str] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: str , *UpperCamelCase_: Optional[Any] , ):
__lowerCamelCase = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(F'Loading model {model_type}' )
__lowerCamelCase = model_type
__lowerCamelCase = tf_checkpoint
__lowerCamelCase = pytorch_dump_output
__lowerCamelCase = config
__lowerCamelCase = finetuning_task_name
def lowerCAmelCase__ ( self: Any ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
if "ckpt" in self._tf_checkpoint.lower():
__lowerCamelCase = self._tf_checkpoint
__lowerCamelCase = """"""
else:
__lowerCamelCase = self._tf_checkpoint
__lowerCamelCase = """"""
convert_transfo_xl_checkpoint_to_pytorch(
UpperCamelCase_ , self._config , self._pytorch_dump_output , UpperCamelCase_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCamelCase_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 360 |
from __future__ import annotations
UpperCAmelCase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: dict[str, list[str]] , UpperCamelCase_: str ):
__lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase = {}
__lowerCamelCase = source_vertex
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = {self.source_vertex}
__lowerCamelCase = None
__lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCamelCase_ )
__lowerCamelCase = vertex
queue.append(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase = self.parent.get(UpperCamelCase_ )
if target_vertex_parent is None:
__lowerCamelCase = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(UpperCamelCase_ )
return self.shortest_path(UpperCamelCase_ ) + F'->{target_vertex}'
if __name__ == "__main__":
UpperCAmelCase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 29 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase (_lowerCAmelCase , _lowerCAmelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) as f:
__lowerCAmelCase = json.load(_lowerCAmelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = []
__lowerCAmelCase = []
for key, info in class_info.items():
__lowerCAmelCase = info["""name"""]
class_names.append(info["""name"""] )
if info["isthing"]:
thing_ids.append(int(_lowerCAmelCase ) )
__lowerCAmelCase = thing_ids
__lowerCAmelCase = class_names
return metadata
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=10 , snake_case_=False , snake_case_=255 , snake_case_="shi-labs/oneformer_demo" , snake_case_="ade20k_panoptic.json" , snake_case_=10 , ) -> Union[str, Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = min_resolution
__lowerCAmelCase = max_resolution
__lowerCAmelCase = do_resize
__lowerCAmelCase = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean
__lowerCAmelCase = image_std
__lowerCAmelCase = class_info_file
__lowerCAmelCase = prepare_metadata(snake_case_ , snake_case_ )
__lowerCAmelCase = num_text
__lowerCAmelCase = repo_path
# for the post_process_functions
__lowerCAmelCase = 2
__lowerCAmelCase = 10
__lowerCAmelCase = 10
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = num_labels
__lowerCAmelCase = do_reduce_labels
__lowerCAmelCase = ignore_index
def A__ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def A__ ( self , snake_case_ , snake_case_=False ) -> Dict:
if not batched:
__lowerCAmelCase = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase = image.size
else:
__lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase = int(self.size["""shortest_edge"""] * h / w )
__lowerCAmelCase = self.size["""shortest_edge"""]
elif w > h:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
__lowerCAmelCase = self.size["""shortest_edge"""]
__lowerCAmelCase = self.size["""shortest_edge"""]
else:
__lowerCAmelCase = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__lowerCAmelCase = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
def A__ ( self ) -> Tuple:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_snake_case = image_processing_class
def A__ ( self ) -> str:
__lowerCAmelCase = OneFormerImageProcessorTester(self )
@property
def A__ ( self ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """ignore_index""" ) )
self.assertTrue(hasattr(snake_case_ , """class_info_file""" ) )
self.assertTrue(hasattr(snake_case_ , """num_text""" ) )
self.assertTrue(hasattr(snake_case_ , """repo_path""" ) )
self.assertTrue(hasattr(snake_case_ , """metadata""" ) )
self.assertTrue(hasattr(snake_case_ , """do_reduce_labels""" ) )
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Union[str, Any]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Tuple:
# Initialize image_processor
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__lowerCAmelCase = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase = self.image_processing_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self , snake_case_=False , snake_case_=False , snake_case_="np" ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCAmelCase = self.image_processing_tester.num_labels
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case_ )
if with_segmentation_maps:
__lowerCAmelCase = num_labels
if is_instance_map:
__lowerCAmelCase = list(range(snake_case_ ) ) * 2
__lowerCAmelCase = dict(enumerate(snake_case_ ) )
__lowerCAmelCase = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCAmelCase = [Image.fromarray(snake_case_ ) for annotation in annotations]
__lowerCAmelCase = image_processor(
snake_case_ , ["""semantic"""] * len(snake_case_ ) , snake_case_ , return_tensors="""pt""" , instance_id_to_semantic_id=snake_case_ , pad_and_return_pixel_mask=snake_case_ , )
return inputs
def A__ ( self ) -> List[str]:
pass
def A__ ( self ) -> Optional[Any]:
def common(snake_case_=False , snake_case_=None ):
__lowerCAmelCase = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case_ , is_instance_map=snake_case_ , segmentation_type=snake_case_ )
__lowerCAmelCase = inputs["""mask_labels"""]
__lowerCAmelCase = inputs["""class_labels"""]
__lowerCAmelCase = inputs["""pixel_values"""]
__lowerCAmelCase = inputs["""text_inputs"""]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case_ , snake_case_ , snake_case_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case_ )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
common(is_instance_map=snake_case_ , segmentation_type="""pil""" )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = np.zeros((20, 50) )
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = binary_mask_to_rle(snake_case_ )
self.assertEqual(len(snake_case_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ )
self.assertEqual(len(snake_case_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCAmelCase = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCAmelCase = fature_extractor.post_process_semantic_segmentation(snake_case_ , target_sizes=snake_case_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_instance_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , )
__lowerCAmelCase = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCAmelCase = image_processor.post_process_panoptic_segmentation(snake_case_ , threshold=0 )
self.assertTrue(len(snake_case_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("""segmentation""" in el )
self.assertTrue("""segments_info""" in el )
self.assertEqual(type(el["""segments_info"""] ) , snake_case_ )
self.assertEqual(
el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 301 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ : List[Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 7 |
import comet # From: unbabel-comet
import torch
import datasets
snake_case_ : Tuple = datasets.logging.get_logger(__name__)
snake_case_ : str = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
snake_case_ : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
snake_case_ : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence'''),
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da'''))
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name))
def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int=None , _snake_case : Optional[Any]=False):
"""simple docstring"""
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for t in zip(*data.values())]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(_snake_case , gpus=_snake_case , progress_bar=_snake_case)
return {"mean_score": mean_score, "scores": scores}
| 7 | 1 |
"""simple docstring"""
from math import ceil
def __SCREAMING_SNAKE_CASE ( A_ = 10_01 ):
lowerCAmelCase__ : Union[str, Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase__ : int = 2 * i + 1
lowerCAmelCase__ : Any = 2 * i
lowerCAmelCase__ : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCamelCase : Optional[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 106 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''CLIPFeatureExtractor''']
__UpperCamelCase : Optional[Any] = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =[[1, 2, 4], [1, 2, 3, 4]]
__UpperCamelCase : int =DisjunctiveConstraint(lowerCamelCase__ )
self.assertTrue(isinstance(dc.token_ids , lowerCamelCase__ ) )
with self.assertRaises(lowerCamelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCamelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =[[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCamelCase__ ):
DisjunctiveConstraint(lowerCamelCase__ ) # fails here
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =[[1, 2, 3], [1, 2, 4]]
__UpperCamelCase : Optional[Any] =DisjunctiveConstraint(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str =dc.update(1 )
__UpperCamelCase : int =stepped is True and completed is False and reset is False
self.assertTrue(lowerCamelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : int =dc.update(2 )
__UpperCamelCase : int =stepped is True and completed is False and reset is False
self.assertTrue(lowerCamelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : int =dc.update(3 )
__UpperCamelCase : Any =stepped is True and completed is True and reset is False
self.assertTrue(lowerCamelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =[[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__UpperCamelCase : str =DisjunctiveConstraint(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple =dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple =dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] =dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any =dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any =dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] =dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple =dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 245 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ :Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[Any] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[int] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A_ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 245 | 1 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def __A ( a_ :int) -> Optional[int]:
__a : Optional[Any] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase)
A = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def __A ( a_ :List[Any]) -> Optional[Any]:
__a : Any = list(s_dict.keys())
for key in keys:
__a : str = key
for k, v in WHISPER_MAPPING.items():
if k in key:
__a : Tuple = new_key.replace(__lowerCamelCase , __lowerCamelCase)
print(F"""{key} -> {new_key}""")
__a : Tuple = s_dict.pop(__lowerCamelCase)
return s_dict
def __A ( a_ :str) -> Union[str, Any]:
__a , __a : List[str] = emb.weight.shape
__a : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase)
__a : List[Any] = emb.weight.data
return lin_layer
def __A ( a_ :int , a_ :int) -> bytes:
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase)
__a : int = os.path.basename(__lowerCamelCase)
__a : List[str] = url.split('''/''')[-2]
__a : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase)
if os.path.exists(__lowerCamelCase) and not os.path.isfile(__lowerCamelCase):
raise RuntimeError(F"""{download_target} exists and is not a regular file""")
if os.path.isfile(__lowerCamelCase):
__a : Union[str, Any] = open(__lowerCamelCase , '''rb''').read()
if hashlib.shaaaa(__lowerCamelCase).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""")
with urllib.request.urlopen(__lowerCamelCase) as source, open(__lowerCamelCase , '''wb''') as output:
with tqdm(
total=int(source.info().get('''Content-Length''')) , ncols=80 , unit='''iB''' , unit_scale=__lowerCamelCase , unit_divisor=10_24) as loop:
while True:
__a : Optional[int] = source.read(81_92)
if not buffer:
break
output.write(__lowerCamelCase)
loop.update(len(__lowerCamelCase))
__a : Tuple = open(__lowerCamelCase , '''rb''').read()
if hashlib.shaaaa(__lowerCamelCase).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''')
return model_bytes
def __A ( a_ :int , a_ :Union[str, Any]) -> str:
if ".pt" not in checkpoint_path:
__a : Optional[Any] = _download(_MODELS[checkpoint_path])
else:
__a : Union[str, Any] = torch.load(__lowerCamelCase , map_location='''cpu''')
__a : List[Any] = original_checkpoint['''dims''']
__a : List[Any] = original_checkpoint['''model_state_dict''']
__a : Dict = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(__lowerCamelCase)
rename_keys(__lowerCamelCase)
__a : Union[str, Any] = True
__a : str = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
__a : Optional[int] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=__lowerCamelCase , decoder_ffn_dim=__lowerCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
__a : int = WhisperForConditionalGeneration(__lowerCamelCase)
__a , __a : Union[str, Any] = model.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase)
if len(__lowerCamelCase) > 0 and not set(__lowerCamelCase) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F""" but all the following weights are missing {missing}""")
if tie_embeds:
__a : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens)
else:
__a : Optional[Any] = proj_out_weights
model.save_pretrained(__lowerCamelCase)
if __name__ == "__main__":
A = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
A = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path) | 160 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__UpperCamelCase : Dict = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def __A ( __lowerCamelCase ) -> List[str]:
a = {}
with open(__lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCamelCase ):
a = line.strip()
if line:
a = line.split()
a = line_number
a = words[0]
a = value
return result
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
for attribute in key.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
a = PARAM_MAPPING[full_name.split(""".""" )[-1]]
a = """param"""
if weight_type is not None and weight_type != "param":
a = getattr(__lowerCamelCase , __lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
a = hf_pointer
for attribute in hf_param_name.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = shape_pointer.shape
# let's reduce dimension
a = value[0]
else:
a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
a = value
elif weight_type == "weight_g":
a = value
elif weight_type == "weight_v":
a = value
elif weight_type == "bias":
a = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = value
else:
a = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
a = PARAM_MAPPING[full_name.split(""".""" )[-1]]
a = """param"""
if weight_type is not None and weight_type != "param":
a = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a = """.""".join([key, hf_param_name] )
else:
a = key
a = value if """lm_head""" in full_key else value[0]
__UpperCamelCase : List[Any] = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None ) -> Optional[Any]:
a = False
for key, mapped_key in MAPPING.items():
a = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
a = True
if "*" in mapped_key:
a = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
a = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
a = """weight_g"""
elif "weight_v" in name:
a = """weight_v"""
elif "bias" in name:
a = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a = """weight"""
else:
a = None
if hf_dict is not None:
rename_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return is_used
return is_used
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
a = []
a = fairseq_model.state_dict()
a = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
a = True
else:
a = load_wavaveca_layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
a = full_name.split("""conv_layers.""" )[-1]
a = name.split(""".""" )
a = int(items[0] )
a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=False ) -> List[Any]:
if config_path is not None:
a = WavaVecaConfig.from_pretrained(__lowerCamelCase )
else:
a = WavaVecaConfig()
if is_seq_class:
a = read_txt_into_dict(__lowerCamelCase )
a = idalabel
a = WavaVecaForSequenceClassification(__lowerCamelCase )
a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
feature_extractor.save_pretrained(__lowerCamelCase )
elif is_finetuned:
if dict_path:
a = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a = target_dict.pad_index
a = target_dict.bos_index
a = target_dict.eos_index
a = len(target_dict.symbols )
a = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
a = target_dict.indices
# fairseq has the <pad> and <s> switched
a = 0
a = 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
a = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
a = True if config.feat_extract_norm == """layer""" else False
a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
a = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
a = WavaVecaForCTC(__lowerCamelCase )
else:
a = WavaVecaForPreTraining(__lowerCamelCase )
if is_finetuned or is_seq_class:
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
a = argparse.Namespace(task="""audio_pretraining""" )
a = fairseq.tasks.setup_task(__lowerCamelCase )
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCamelCase )
a = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__UpperCamelCase : Union[str, Any] = parser.parse_args()
__UpperCamelCase : Any = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 228 | 0 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> List[str]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__A )
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> str:
from transformers.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__A , id=__A )
| 111 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''deberta-v2'''
def __init__( self : str , __lowerCamelCase : Union[str, Any]=1_2_8_1_0_0 , __lowerCamelCase : Optional[int]=1_5_3_6 , __lowerCamelCase : Optional[int]=2_4 , __lowerCamelCase : Optional[int]=2_4 , __lowerCamelCase : Tuple=6_1_4_4 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=5_1_2 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : int=1e-7 , __lowerCamelCase : Any=False , __lowerCamelCase : Any=-1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Any="gelu" , **__lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = max_relative_positions
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = position_biased_input
# Backwards compatibility
if type(__lowerCamelCase ) == str:
_SCREAMING_SNAKE_CASE = [x.strip() for x in pos_att_type.lower().split("|" )]
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = kwargs.get("pooler_hidden_size" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = pooler_dropout
_SCREAMING_SNAKE_CASE = pooler_hidden_act
class lowercase_ ( A ):
"""simple docstring"""
@property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"}
else:
_SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return 1_2
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 4_0 , __lowerCamelCase : int = 4_0 , __lowerCamelCase : "PreTrainedTokenizerBase" = None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(preprocessor=__lowerCamelCase , framework=__lowerCamelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 111 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """vocab.json"""}
a_ = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
a_ = {"""mgp-str""": 27}
class __lowerCAmelCase ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="[GO]" , __UpperCAmelCase="[GO]" , __UpperCAmelCase="[s]" , __UpperCAmelCase="[GO]" , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(
unk_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , **_A , )
with open(_A , encoding='''utf-8''' ) as vocab_handle:
__lowerCamelCase = json.load(_A )
__lowerCamelCase = {v: k for k, v in self.vocab.items()}
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return len(self.vocab )
def lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = []
for s in text:
char_tokens.extend(_A )
return char_tokens
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.decoder.get(_A )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_A ) )
return
__lowerCamelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '''\n''' )
return (vocab_file,)
| 330 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_SCREAMING_SNAKE_CASE = get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : Dict = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Dict = os.path.join(__a , __a )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Dict = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Dict = os.path.join(__a , __a )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Optional[int] = os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving model to {ckpt_dir}""" )
snake_case_ : int = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Optional[Any] = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[Any] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Optional[Any] = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Tuple = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Tuple = (
os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
snake_case_ : List[Any] = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , )
snake_case_ : Any = state_dict['model']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__a )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : List[str] = FSDP.optim_state_dict(__a , __a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ : str = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : Any = os.path.join(__a , __a )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__a , __a )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
snake_case_ : Optional[int] = os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ : Union[str, Any] = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : List[Any] = os.path.join(__a , __a )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
snake_case_ : str = (
os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
snake_case_ : Any = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__a ) , )
snake_case_ : Optional[int] = optim_state['optimizer']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
snake_case_ : Optional[Any] = FSDP.optim_state_dict_to_load(__a , __a , __a )
optimizer.load_state_dict(__a )
| 327 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = MgpstrTokenizer
_UpperCAmelCase :Optional[int] = False
_UpperCAmelCase :Tuple = {}
_UpperCAmelCase :Dict = False
def _snake_case ( self ):
super().setUp()
# fmt: off
lowercase__: Union[str, Any] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__: str = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
def _snake_case ( self , **_UpperCAmelCase ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: str = '''tester'''
lowercase__: Any = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
lowercase__: Any = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowercase__: List[Any] = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
lowercase__: Tuple = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
lowercase__: List[str] = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def _snake_case ( self ):
lowercase__: List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowercase__, lowercase__: int = self.get_input_output_texts(_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
lowercase__: List[Any] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
lowercase__: Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ) , 0 )
lowercase__: List[str] = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _UpperCAmelCase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def _snake_case ( self ):
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def _snake_case ( self ):
pass
| 2 | """simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
__A = parser.parse_args()
__A = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 2 | 1 |
from __future__ import annotations
from typing import Any
def __a ( SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
create_state_space_tree(SCREAMING_SNAKE_CASE , [] , 0 )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE ):
print(SCREAMING_SNAKE_CASE )
return
create_state_space_tree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
A_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 333 |
import doctest
from collections import deque
import numpy as np
class A_ :
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = [2, 1, 2, -1]
__UpperCAmelCase = [1, 2, 3, 4]
def lowerCAmelCase_ (self ) -> list[float]:
__UpperCAmelCase = len(self.first_signal )
__UpperCAmelCase = len(self.second_signal )
__UpperCAmelCase = max(lowercase__ , lowercase__ )
# create a zero matrix of max_length x max_length
__UpperCAmelCase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__UpperCAmelCase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCAmelCase = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 333 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = '''▁'''
_lowerCAmelCase : str = {'''vocab_file''': '''spiece.model'''}
_lowerCAmelCase : int = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
_lowerCAmelCase : Optional[Any] = {
'''google/reformer-crime-and-punishment''': 524_288,
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self :int , snake_case :Any , snake_case :Optional[int]="</s>" , snake_case :Dict="<unk>" , snake_case :Tuple=[] , snake_case :Optional[Dict[str, Any]] = None , **snake_case :Any , ):
'''simple docstring'''
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case , unk_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
A_ : Dict = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : int = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.__dict__.copy()
A_ : Optional[int] = None
return state
def __setstate__( self :Dict , snake_case :Optional[Any] ):
'''simple docstring'''
A_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A_ : List[str] = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :str ):
'''simple docstring'''
return self.sp_model.encode(snake_case , out_type=snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Optional[int] ):
'''simple docstring'''
return self.sp_model.piece_to_id(snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Tuple ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
A_ : Dict = self.sp_model.IdToPiece(snake_case )
return token
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Dict ):
'''simple docstring'''
A_ : Optional[int] = []
A_ : List[Any] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case ) + token
A_ : Dict = []
else:
current_sub_tokens.append(snake_case )
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A_ : List[Any] = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , "wb" ) as fi:
A_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 70 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : List[Any] = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 7 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
"""simple docstring"""
def __init__( self : Union[str, Any],lowercase_ : Any,lowercase_ : Union[str, Any]=1_3,lowercase_ : Tuple=3_0,lowercase_ : List[Any]=2,lowercase_ : Optional[int]=3,lowercase_ : Union[str, Any]=True,lowercase_ : Tuple=True,lowercase_ : Any=3_2,lowercase_ : List[str]=2,lowercase_ : Optional[int]=4,lowercase_ : Union[str, Any]=3_7,lowercase_ : Tuple="gelu",lowercase_ : str=0.1,lowercase_ : Tuple=0.1,lowercase_ : Union[str, Any]=1_0,lowercase_ : int=0.02,lowercase_ : List[Any]=3,lowercase_ : Any=None,)-> Dict:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 1
def snake_case__ ( self : int )-> List[str]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Tuple )-> List[Any]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=lowercase_,initializer_range=self.initializer_range,)
def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Tuple )-> Optional[Any]:
'''simple docstring'''
A__ = TFViTModel(config=lowercase_ )
A__ = model(lowercase_,training=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
A__ = self.image_size // 2
A__ = pixel_values[:, :, :image_size, :image_size]
A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ )
A__ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, seq_length, self.hidden_size) )
def snake_case__ ( self : List[Any],lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : List[Any] )-> Dict:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFViTForImageClassification(lowercase_ )
A__ = model(lowercase_,labels=lowercase_,training=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
A__ = self.image_size // 2
A__ = pixel_values[:, :, :image_size, :image_size]
A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = TFViTForImageClassification(lowercase_ )
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCamelCase = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : int )-> List[Any]:
'''simple docstring'''
A__ = TFViTModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def snake_case__ ( self : Any )-> int:
'''simple docstring'''
pass
def snake_case__ ( self : str )-> Dict:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings(),(tf.keras.layers.Layer) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_,tf.keras.layers.Layer ) )
def snake_case__ ( self : int )-> List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
A__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1],lowercase_ )
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : Optional[Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(lowercase_ )
def _snake_case( ) -> str:
'''simple docstring'''
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : List[Any] )-> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def snake_case__ ( self : Any )-> Dict:
'''simple docstring'''
A__ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=lowercase_,return_tensors='tf' )
# forward pass
A__ = model(**lowercase_ )
# verify the logits
A__ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape,lowercase_ )
A__ = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3],lowercase_,atol=1E-4 )
| 7 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase (_A ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[str] = str(_A )
_lowerCAmelCase : Any = [n]
for i in range(1 , len(_A ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowercase (_A ):
"""simple docstring"""
if len(str(_A ) ) > 3:
if not is_prime(int(str(_A )[-3:] ) ) or not is_prime(int(str(_A )[:3] ) ):
return False
return True
def lowercase (_A = 1_1 ):
"""simple docstring"""
_lowerCAmelCase : list[int] = []
_lowerCAmelCase : Optional[int] = 1_3
while len(_A ) != count:
if validate(_A ):
_lowerCAmelCase : Any = list_truncated_nums(_A )
if all(is_prime(_A ) for i in list_nums ):
list_truncated_primes.append(_A )
num += 2
return list_truncated_primes
def lowercase ():
"""simple docstring"""
return sum(compute_truncated_primes(1_1 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 25 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 25 | 1 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase__ : str = logging.getLogger(__name__)
UpperCAmelCase__ : Dict = 50 # max width of layer names
UpperCAmelCase__ : Dict = 70 # max width of quantizer names
def __lowercase ( _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=_A , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=_A , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=_A , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=_A , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=_A , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=_A , type=_A , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=_A , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def __lowercase ( _A ) -> Dict:
if args.calibrator == "max":
SCREAMING_SNAKE_CASE : List[str] = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
SCREAMING_SNAKE_CASE : List[str] = """histogram"""
elif args.calibrator == "mse":
SCREAMING_SNAKE_CASE : Optional[int] = """histogram"""
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
SCREAMING_SNAKE_CASE : Dict = QuantDescriptor(num_bits=args.aprec , calib_method=_A )
SCREAMING_SNAKE_CASE : int = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_A )
quant_nn.QuantLinear.set_default_quant_desc_weight(_A )
def __lowercase ( _A , _A , _A=False , _A=False ) -> str:
logger.info("""Configuring Model for Quantization""" )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_A , ["""embeddings"""] , which="""weight""" , _disabled=_A )
if args.quant_disable:
set_quantizer_by_name(_A , [""""""] , _disabled=_A )
if args.quant_disable_keyword:
set_quantizer_by_name(_A , args.quant_disable_keyword , _disabled=_A )
if args.quant_disable_layer_module:
set_quantizer_by_name(_A , [R"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=_A )
if args.quant_enable_layer_module:
set_quantizer_by_name(_A , [R"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=_A )
if args.recalibrate_weights:
recalibrate_weights(_A )
if args.fuse_qkv:
fuse_qkv(_A , _A )
if args.clip_gelu:
clip_gelu(_A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_A )
def __lowercase ( _A ) -> Any:
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def __lowercase ( _A , _A ) -> int:
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_A )
def __lowercase ( _A , _A ) -> Dict:
def fusea(_A , _A , _A ):
for mod in [qq, qk, qv]:
if not hasattr(_A , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
SCREAMING_SNAKE_CASE : str = qq._amax.detach().item()
SCREAMING_SNAKE_CASE : str = qk._amax.detach().item()
SCREAMING_SNAKE_CASE : str = qv._amax.detach().item()
SCREAMING_SNAKE_CASE : int = max(_A , _A , _A )
qq._amax.fill_(_A )
qk._amax.fill_(_A )
qv._amax.fill_(_A )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def __lowercase ( _A , _A ) -> Dict:
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
SCREAMING_SNAKE_CASE : Any = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_A )
SCREAMING_SNAKE_CASE : List[Any] = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def __lowercase ( _A ) -> Any:
for name, mod in model.named_modules():
if hasattr(_A , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
SCREAMING_SNAKE_CASE : List[Any] = mod.weight.shape[0]
SCREAMING_SNAKE_CASE : List[Any] = mod._weight_quantizer._amax.detach()
SCREAMING_SNAKE_CASE : Tuple = torch.ones(_A , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def __lowercase ( _A ) -> List[str]:
for name, mod in model.named_modules():
if hasattr(_A , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
SCREAMING_SNAKE_CASE : str = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
SCREAMING_SNAKE_CASE : str = set(range(len(mod.weight.size() ) ) ) - axis_set
SCREAMING_SNAKE_CASE : Dict = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_A , keepdims=_A ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
SCREAMING_SNAKE_CASE : Optional[Any] = amax
def __lowercase ( _A , _A=25 , _A=180 , _A=None ) -> List[str]:
if ignore is None:
SCREAMING_SNAKE_CASE : Optional[Any] = []
elif not isinstance(_A , _A ):
SCREAMING_SNAKE_CASE : Dict = [ignore]
SCREAMING_SNAKE_CASE : Optional[int] = 0
for name, mod in model.named_modules():
if not hasattr(_A , """weight""" ):
continue
SCREAMING_SNAKE_CASE : Optional[Any] = max(_A , len(_A ) )
for name, mod in model.named_modules():
SCREAMING_SNAKE_CASE : Any = getattr(_A , """_input_quantizer""" , _A )
SCREAMING_SNAKE_CASE : List[Any] = getattr(_A , """_weight_quantizer""" , _A )
if not hasattr(_A , """weight""" ):
continue
if type(_A ) in ignore:
continue
if [True for s in ignore if type(_A ) is str and s in name]:
continue
SCREAMING_SNAKE_CASE : Tuple = F"Act:{input_q.extra_repr()}"
SCREAMING_SNAKE_CASE : List[Any] = F"Wgt:{weight_q.extra_repr()}"
SCREAMING_SNAKE_CASE : Any = F"{name:{name_width}} {act_str} {wgt_str}"
if len(_A ) <= line_width:
logger.info(_A )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def __lowercase ( _A ) -> List[Any]:
SCREAMING_SNAKE_CASE : Any = 0
for name, mod in model.named_modules():
if isinstance(_A , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def __lowercase ( _A , _A , _A , _A , _A ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(_A , _A , _A )
if quantizer_mod is not None:
assert hasattr(_A , _A )
setattr(_A , _A , _A )
else:
logger.warning(F"{name} has no {quantizer}" )
def __lowercase ( _A , _A , _A="both" , **_A ) -> Tuple:
SCREAMING_SNAKE_CASE : Tuple = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(_A , _A , """_input_quantizer""" , _A , _A )
if which in ["weight", "both"]:
set_quantizer(_A , _A , """_weight_quantizer""" , _A , _A )
logger.info(_A )
def __lowercase ( _A , _A , **_A ) -> Tuple:
for name, mod in model.named_modules():
if hasattr(_A , """_input_quantizer""" ) or hasattr(_A , """_weight_quantizer""" ):
for n in names:
if re.search(_A , _A ):
set_quantizers(_A , _A , **_A )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(_A , _A ):
SCREAMING_SNAKE_CASE : Dict = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(_A , _A , _A )
logger.info(_A )
| 245 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __lowercase ( _A ) -> List[Tuple[int, ...]]:
SCREAMING_SNAKE_CASE : Optional[int] = []
if isinstance(_A , _A ):
for v in tree.values():
shapes.extend(_fetch_dims(_A ) )
elif isinstance(_A , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_A ) )
elif isinstance(_A , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def __lowercase ( _A , _A ) -> Tuple[int, ...]:
SCREAMING_SNAKE_CASE : List[Any] = []
for d in reversed(_A ):
idx.append(flat_idx % d )
SCREAMING_SNAKE_CASE : Tuple = flat_idx // d
return tuple(reversed(_A ) )
@torch.jit.ignore
def __lowercase ( _A , _A , _A , _A = None , _A = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(_A ) -> None:
SCREAMING_SNAKE_CASE : int = True
for i in range(len(_A ) ):
SCREAMING_SNAKE_CASE : Dict = -1 * (i + 1)
l[reversed_idx] &= tally
SCREAMING_SNAKE_CASE : Any = l[reversed_idx]
if start_edges is None:
SCREAMING_SNAKE_CASE : Tuple = [s == 0 for s in start]
reduce_edge_list(_A )
if end_edges is None:
SCREAMING_SNAKE_CASE : Tuple = [e == (d - 1) for e, d in zip(_A , _A )]
reduce_edge_list(_A )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_A ) == 0:
return [()]
elif len(_A ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
SCREAMING_SNAKE_CASE : List[Tuple[slice, ...]] = []
SCREAMING_SNAKE_CASE : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_A , _A ):
if s == e:
path_list.append(slice(_A , s + 1 ) )
else:
break
SCREAMING_SNAKE_CASE : Tuple[slice, ...] = tuple(_A )
SCREAMING_SNAKE_CASE : List[str] = len(_A )
# start == end, and we're done
if divergence_idx == len(_A ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : List[str] = start[divergence_idx]
return tuple(
path + (slice(_A , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : Tuple = end[divergence_idx]
return tuple(
path + (slice(_A , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
SCREAMING_SNAKE_CASE : int = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __lowercase ( _A , _A , _A , _A ) -> torch.Tensor:
SCREAMING_SNAKE_CASE : Tuple = t.shape[:no_batch_dims]
SCREAMING_SNAKE_CASE : Union[str, Any] = list(_flat_idx_to_idx(_A , _A ) )
# _get_minimal_slice_set is inclusive
SCREAMING_SNAKE_CASE : Any = list(_flat_idx_to_idx(flat_end - 1 , _A ) )
# Get an ordered list of slices to perform
SCREAMING_SNAKE_CASE : List[Any] = _get_minimal_slice_set(
_A , _A , _A , )
SCREAMING_SNAKE_CASE : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __lowercase ( _A , _A , _A , _A , _A = False , _A = None , _A = False , ) -> Any:
if not (len(_A ) > 0):
raise ValueError("""Must provide at least one input""" )
SCREAMING_SNAKE_CASE : Tuple = [shape[:no_batch_dims] for shape in _fetch_dims(_A )]
SCREAMING_SNAKE_CASE : str = tuple([max(_A ) for s in zip(*_A )] )
def _prep_inputs(_A ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
SCREAMING_SNAKE_CASE : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
SCREAMING_SNAKE_CASE : Union[str, Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_prep_inputs , _A )
SCREAMING_SNAKE_CASE : Optional[int] = None
if _out is not None:
SCREAMING_SNAKE_CASE : Optional[int] = tensor_tree_map(lambda _A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
SCREAMING_SNAKE_CASE : Optional[int] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
SCREAMING_SNAKE_CASE : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_A ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = prepped_outputs
for _ in range(_A ):
# Chunk the input
if not low_mem:
SCREAMING_SNAKE_CASE : int = _select_chunk
else:
SCREAMING_SNAKE_CASE : Optional[int] = partial(
_chunk_slice , flat_start=_A , flat_end=min(_A , i + chunk_size ) , no_batch_dims=len(_A ) , )
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_A , _A )
# Run the layer on the chunk
SCREAMING_SNAKE_CASE : Tuple = layer(**_A )
# Allocate space for the output
if out is None:
SCREAMING_SNAKE_CASE : List[str] = tensor_tree_map(lambda _A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _A )
# Put the chunk in its pre-allocated space
if isinstance(_A , _A ):
def assign(_A , _A ) -> None:
for k, v in da.items():
if isinstance(_A , _A ):
assign(_A , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = da[k]
assign(_A , _A )
elif isinstance(_A , _A ):
for xa, xa in zip(_A , _A ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
SCREAMING_SNAKE_CASE : str = xa
elif isinstance(_A , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
SCREAMING_SNAKE_CASE : List[Any] = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
SCREAMING_SNAKE_CASE : Any = tensor_tree_map(lambda _A : t.view(orig_batch_dims + t.shape[1:] ) , _A )
return out
class a__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : int = 5_1_2 , ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = max_chunk_size
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[tuple] = None
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Callable , UpperCAmelCase__ : tuple , UpperCAmelCase__ : int ) ->int:
"""simple docstring"""
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
SCREAMING_SNAKE_CASE : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
SCREAMING_SNAKE_CASE : Dict = [c for c in candidates if c > min_chunk_size]
SCREAMING_SNAKE_CASE : List[str] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase__ : int ) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase__ , chunk_size=UpperCAmelCase__ )
return True
except RuntimeError:
return False
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[str] = len(UpperCAmelCase__ ) - 1
while i > min_viable_chunk_size_index:
SCREAMING_SNAKE_CASE : int = test_chunk_size(candidates[i] )
if not viable:
SCREAMING_SNAKE_CASE : Tuple = (min_viable_chunk_size_index + i) // 2
else:
SCREAMING_SNAKE_CASE : List[str] = i
SCREAMING_SNAKE_CASE : List[str] = (i + len(UpperCAmelCase__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Iterable , UpperCAmelCase__ : Iterable ) ->bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = True
for aa, aa in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
assert type(UpperCAmelCase__ ) == type(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , (list, tuple) ):
consistent &= self._compare_arg_caches(UpperCAmelCase__ , UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase__ : x[0] )]
SCREAMING_SNAKE_CASE : List[str] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase__ : x[0] )]
consistent &= self._compare_arg_caches(UpperCAmelCase__ , UpperCAmelCase__ )
else:
consistent &= aa == aa
return consistent
def _lowercase ( self : List[str] , UpperCAmelCase__ : Callable , UpperCAmelCase__ : tuple , UpperCAmelCase__ : int , ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : tuple = tree_map(lambda UpperCAmelCase__ : a.shape if isinstance(UpperCAmelCase__ , torch.Tensor ) else a , UpperCAmelCase__ , UpperCAmelCase__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase__ )
else:
# Otherwise, we can reuse the precomputed value
SCREAMING_SNAKE_CASE : List[Any] = False
if not consistent:
SCREAMING_SNAKE_CASE : List[Any] = self._determine_favorable_chunk_size(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 245 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_lowercase: Any = None
_lowercase: Optional[int] = logging.get_logger(__name__)
_lowercase: str = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
_lowercase: Tuple = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
_lowercase: List[Any] = {
"google/rembert": 256,
}
_lowercase: Dict = "▁"
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = RemBertTokenizer
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_="[CLS]" , lowerCamelCase_="[SEP]" , lowerCamelCase_="<unk>" , lowerCamelCase_="[SEP]" , lowerCamelCase_="<pad>" , lowerCamelCase_="[CLS]" , lowerCamelCase_="[MASK]" , **lowerCamelCase_ , ):
"""simple docstring"""
a = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
a = do_lower_case
a = remove_space
a = keep_accents
a = vocab_file
a = False if not self.vocab_file else True
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowerCamelCase_ ) )
return
a = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 371 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a( A : List[str] , A : int=0.999 , A : Union[str, Any]="cosine" , ) -> Optional[int]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A : Optional[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
a = []
for i in range(A ):
a = i / num_diffusion_timesteps
a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A ) / alpha_bar_fn(A ) , A ) )
return torch.tensor(A , dtype=torch.floataa )
class _lowercase ( lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
__A = [e.name for e in KarrasDiffusionSchedulers]
__A = 2
@register_to_config
def __init__(self , lowerCamelCase_ = 1000 , lowerCamelCase_ = 0.0_0085 , lowerCamelCase_ = 0.012 , lowerCamelCase_ = "linear" , lowerCamelCase_ = None , lowerCamelCase_ = "epsilon" , lowerCamelCase_ = "linspace" , lowerCamelCase_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
a = torch.tensor(lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
a = torch.linspace(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a = betas_for_alpha_bar(lowerCamelCase_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
a = 1.0 - self.betas
a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
if schedule_timesteps is None:
a = self.timesteps
a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
a = 1 if len(lowerCamelCase_ ) > 1 else 0
else:
a = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = self.index_for_timestep(lowerCamelCase_ )
if self.state_in_first_order:
a = self.sigmas[step_index]
else:
a = self.sigmas_interpol[step_index]
a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ):
"""simple docstring"""
a = num_inference_steps
a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
a = np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase_ , dtype=lowerCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a = (np.arange(0 , lowerCamelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a = (np.arange(lowerCamelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
a = torch.from_numpy(np.log(lowerCamelCase_ ) ).to(lowerCamelCase_ )
a = np.interp(lowerCamelCase_ , np.arange(0 , len(lowerCamelCase_ ) ) , lowerCamelCase_ )
a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
a = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ )
# interpolate sigmas
a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowerCamelCase_ ).startswith("mps" ):
# mps does not support float64
a = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ , dtype=torch.floataa )
else:
a = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
# interpolate timesteps
a = self.sigma_to_t(lowerCamelCase_ ).to(lowerCamelCase_ , dtype=timesteps.dtype )
a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
a = torch.cat([timesteps[:1], interleaved_timesteps] )
a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
a = defaultdict(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = sigma.log()
# get distribution
a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
a = low_idx + 1
a = self.log_sigmas[low_idx]
a = self.log_sigmas[high_idx]
# interpolate sigmas
a = (low - log_sigma) / (low - high)
a = w.clamp(0 , 1 )
# transform interpolation to time range
a = (1 - w) * low_idx + w * high_idx
a = t.view(sigma.shape )
return t
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.sample is None
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ):
"""simple docstring"""
a = self.index_for_timestep(lowerCamelCase_ )
# advance index counter by 1
a = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
a = self.sigmas[step_index]
a = self.sigmas_interpol[step_index + 1]
a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
a = self.sigmas[step_index - 1]
a = self.sigmas_interpol[step_index]
a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
a = 0
a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
a = sigma_hat if self.state_in_first_order else sigma_interpol
a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
a = sigma_hat if self.state_in_first_order else sigma_interpol
a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
a = sigma_interpol - sigma_hat
# store for 2nd order step
a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
a = sigma_next - sigma_hat
a = self.sample
a = None
a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase_ ):
# mps does not support float64
a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
a = self.timesteps.to(original_samples.device )
a = timesteps.to(original_samples.device )
a = [self.index_for_timestep(lowerCamelCase_ , lowerCamelCase_ ) for t in timesteps]
a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
a = sigma.unsqueeze(-1 )
a = original_samples + noise * sigma
return noisy_samples
def __len__(self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 71 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> None:
lowercase__: int = len(__UpperCAmelCase )
print('''The following activities are selected:''' )
# The first activity is always selected
lowercase__: str = 0
print(__UpperCAmelCase , end=''',''' )
# Consider rest of the activities
for j in range(__UpperCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__UpperCAmelCase , end=''',''' )
lowercase__: Optional[int] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = [1, 3, 0, 5, 8, 5]
__A = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 177 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Dict = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Dict = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = GPTaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 316 | 0 |
'''simple docstring'''
from itertools import product
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sides_number
__SCREAMING_SNAKE_CASE = max_face_number * dice_number
__SCREAMING_SNAKE_CASE = [0] * (max_total + 1)
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = range(a__ , max_face_number + 1 )
for dice_numbers in product(a__ , repeat=a__ ):
__SCREAMING_SNAKE_CASE = sum(a__ )
totals_frequencies[total] += 1
return totals_frequencies
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__SCREAMING_SNAKE_CASE = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 9
__SCREAMING_SNAKE_CASE = 4 * 9
__SCREAMING_SNAKE_CASE = 6
for peter_total in range(a__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__SCREAMING_SNAKE_CASE = (4**9) * (6**6)
__SCREAMING_SNAKE_CASE = peter_wins_count / total_games_number
__SCREAMING_SNAKE_CASE = round(a__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 364 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=36 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : int=None , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = 300
return config
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = MraModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = MraForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""MRA does not output attentions""" )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
return
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_265
__SCREAMING_SNAKE_CASE = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__SCREAMING_SNAKE_CASE = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_265
__SCREAMING_SNAKE_CASE = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 331 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : int = MgpstrTokenizer
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : int = False
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '''\n''' )
def UpperCamelCase__ (self : Tuple , **UpperCamelCase : Dict ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase__ (self : List[str] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = '''tester'''
lowercase__ = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowercase__ = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
lowercase__ = tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase )
self.assertEqual(len(UpperCamelCase ) , 1 )
lowercase__ = tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
self.assertTrue(special_token not in decoded )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowercase__ ,lowercase__ = self.get_input_output_texts(UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertNotEqual(len(UpperCamelCase ) , 0 )
lowercase__ = tokenizer.decode(UpperCamelCase )
self.assertIsInstance(UpperCamelCase , UpperCamelCase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , UpperCamelCase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
pass
| 2 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : str = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 | 1 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 369 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) -> Optional[Any]:
A_ : Any = data
A_ : Node | None = None
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> List[str]:
A_ : Tuple = None
A_ : str = None
def __iter__( self ) -> Iterator[Any]:
A_ : Dict = self.head
while self.head:
yield node.data
A_ : Optional[Any] = node.next
if node == self.head:
break
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join(str(_lowerCamelCase ) for item in iter(self ) )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
self.insert_nth(len(self ) , _lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> None:
self.insert_nth(0 , _lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> None:
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
A_ : Optional[int] = Node(_lowerCamelCase )
if self.head is None:
A_ : str = new_node # first node points itself
A_ : Union[str, Any] = new_node
elif index == 0: # insert at head
A_ : List[Any] = self.head
A_ : List[Any] = new_node
else:
A_ : List[str] = self.head
for _ in range(index - 1 ):
A_ : Optional[int] = temp.next
A_ : Tuple = temp.next
A_ : str = new_node
if index == len(self ) - 1: # insert at tail
A_ : Optional[int] = new_node
def UpperCAmelCase_ ( self ) -> List[Any]:
return self.delete_nth(0 )
def UpperCAmelCase_ ( self ) -> Any:
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self , _lowerCamelCase = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
A_ : int = self.head
if self.head == self.tail: # just one node
A_ : int = None
elif index == 0: # delete head node
A_ : Union[str, Any] = self.tail.next.next
A_ : Tuple = self.head.next
else:
A_ : Optional[int] = self.head
for _ in range(index - 1 ):
A_ : Tuple = temp.next
A_ : Any = temp.next
A_ : Tuple = temp.next.next
if index == len(self ) - 1: # delete at tail
A_ : List[str] = temp
return delete_node.data
def UpperCAmelCase_ ( self ) -> bool:
return len(self ) == 0
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
A_ : Any = CircularLinkedList()
assert len(a_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(a_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(a_ ) == i
circular_linked_list.insert_nth(a_ , i + 1 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(a_ ) == "->".join(str(a_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 164 | 0 |
import math
from datetime import datetime, timedelta
def lowerCamelCase_ ( _UpperCamelCase ) -> datetime:
"""simple docstring"""
snake_case_ : List[str] = year % 19
snake_case_ : Union[str, Any] = year % 4
snake_case_ : List[str] = year % 7
snake_case_ : Optional[Any] = math.floor(year / 100 )
snake_case_ : Any = math.floor((13 + 8 * leap_day_inhibits) / 25 )
snake_case_ : Optional[int] = leap_day_inhibits / 4
snake_case_ : Union[str, Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
snake_case_ : Tuple = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
snake_case_ : Optional[Any] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
snake_case_ : Optional[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(_UpperCamelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(_UpperCamelCase , 4 , 18 )
else:
return datetime(_UpperCamelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase_ = '''will be''' if year > datetime.now().year else '''was'''
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 279 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase_ = random.Random()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
"""simple docstring"""
if rng is None:
snake_case_ : str = global_rng
snake_case_ : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=400 , __magic_name__=2000 , __magic_name__=10 , __magic_name__=160 , __magic_name__=8 , __magic_name__=0.0 , __magic_name__=4000 , __magic_name__=False , __magic_name__=True , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : str = batch_size
snake_case_ : Union[str, Any] = min_seq_length
snake_case_ : Tuple = max_seq_length
snake_case_ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ : Optional[int] = padding_value
snake_case_ : Union[str, Any] = sampling_rate
snake_case_ : Optional[int] = return_attention_mask
snake_case_ : str = do_normalize
snake_case_ : str = feature_size
snake_case_ : Optional[Any] = chunk_length
snake_case_ : Union[str, Any] = hop_length
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase (self , __magic_name__=False , __magic_name__=False ) -> Optional[Any]:
'''simple docstring'''
def _flatten(__magic_name__ ):
return list(itertools.chain(*__magic_name__ ) )
if equal_length:
snake_case_ : int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case_ : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ : str = [np.asarray(__magic_name__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = WhisperFeatureExtractionTester(self )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Union[str, Any] = feat_extract_first.save_pretrained(__magic_name__ )[0]
check_json_file_has_correct_format(__magic_name__ )
snake_case_ : List[Any] = self.feature_extraction_class.from_pretrained(__magic_name__ )
snake_case_ : Optional[int] = feat_extract_first.to_dict()
snake_case_ : Dict = feat_extract_second.to_dict()
snake_case_ : List[str] = feat_extract_first.mel_filters
snake_case_ : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = os.path.join(__magic_name__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(__magic_name__ )
snake_case_ : Optional[int] = self.feature_extraction_class.from_json_file(__magic_name__ )
snake_case_ : int = feat_extract_first.to_dict()
snake_case_ : Optional[int] = feat_extract_second.to_dict()
snake_case_ : Union[str, Any] = feat_extract_first.mel_filters
snake_case_ : str = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ ) )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case_ : str = [np.asarray(__magic_name__ ) for speech_input in speech_inputs]
# Test feature size
snake_case_ : str = feature_extractor(__magic_name__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case_ : Dict = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
snake_case_ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test batched
snake_case_ : int = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case_ : List[str] = np.asarray(__magic_name__ )
snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
snake_case_ : Dict = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
# Test truncation required
snake_case_ : Any = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
snake_case_ : Union[str, Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs]
snake_case_ : Tuple = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case_ : Optional[Any] = [np.asarray(__magic_name__ ) for speech_input in speech_inputs_truncated]
snake_case_ : Any = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
snake_case_ : List[Any] = feature_extractor(__magic_name__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__magic_name__ , __magic_name__ ):
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
import torch
snake_case_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Union[str, Any] = np.random.rand(100 , 32 ).astype(np.floataa )
snake_case_ : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case_ : Optional[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
snake_case_ : Optional[Any] = ds.sort('''id''' ).select(range(__magic_name__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : str = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
snake_case_ : List[Any] = self._load_datasamples(1 )
snake_case_ : Union[str, Any] = WhisperFeatureExtractor()
snake_case_ : Union[str, Any] = feature_extractor(__magic_name__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __magic_name__ , atol=1e-4 ) )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ : Optional[int] = self._load_datasamples(1 )[0]
snake_case_ : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
snake_case_ : Optional[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__magic_name__ )[0]
self.assertTrue(np.all(np.mean(__magic_name__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__magic_name__ ) - 1 ) < 1e-3 ) )
| 279 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=99 , _lowerCamelCase=0 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=12 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase="last" , _lowerCamelCase=None , _lowerCamelCase=None , ):
"""simple docstring"""
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Any = use_input_lengths
UpperCAmelCase__ : Any = use_token_type_ids
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : Optional[Any] = gelu_activation
UpperCAmelCase__ : Dict = sinusoidal_embeddings
UpperCAmelCase__ : Union[str, Any] = causal
UpperCAmelCase__ : Union[str, Any] = asm
UpperCAmelCase__ : List[Any] = n_langs
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : Dict = n_special
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : str = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : Tuple = type_sequence_label_size
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : List[Any] = num_choices
UpperCAmelCase__ : int = summary_type
UpperCAmelCase__ : Union[str, Any] = use_proj
UpperCAmelCase__ : Union[str, Any] = scope
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Dict = None
if self.use_input_lengths:
UpperCAmelCase__ : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase__ : int = None
if self.use_token_type_ids:
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase__ : str = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a (self ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = FlaubertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ : Tuple = model(_lowercase , lengths=_lowercase , langs=_lowercase )
UpperCAmelCase__ : int = model(_lowercase , langs=_lowercase )
UpperCAmelCase__ : Union[str, Any] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = FlaubertWithLMHeadModel(_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Dict = FlaubertForQuestionAnsweringSimple(_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ : List[Any] = model(_lowercase )
UpperCAmelCase__ : Optional[Any] = model(_lowercase , start_positions=_lowercase , end_positions=_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = FlaubertForQuestionAnswering(_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ : int = model(_lowercase )
UpperCAmelCase__ : Optional[Any] = model(
_lowercase , start_positions=_lowercase , end_positions=_lowercase , cls_index=_lowercase , is_impossible=_lowercase , p_mask=_lowercase , )
UpperCAmelCase__ : Tuple = model(
_lowercase , start_positions=_lowercase , end_positions=_lowercase , cls_index=_lowercase , is_impossible=_lowercase , )
((UpperCAmelCase__ ) , ) : List[str] = result_with_labels.to_tuple()
UpperCAmelCase__ : str = model(_lowercase , start_positions=_lowercase , end_positions=_lowercase )
((UpperCAmelCase__ ) , ) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = FlaubertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ : Dict = model(_lowercase )
UpperCAmelCase__ : Union[str, Any] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : int = self.num_labels
UpperCAmelCase__ : Union[str, Any] = FlaubertForTokenClassification(_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ : List[str] = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = self.num_choices
UpperCAmelCase__ : List[Any] = FlaubertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : List[Any] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Any = config_and_inputs
UpperCAmelCase__ : List[str] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
UpperCAmelCase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
UpperCAmelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = FlaubertModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_lowercase , emb_dim=37 )
def _a (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_lowercase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_lowercase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_lowercase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_lowercase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_lowercase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_lowercase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_lowercase )
@slow
def _a (self ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = FlaubertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
@require_torch_gpu
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : List[str] = model_class(config=_lowercase )
UpperCAmelCase__ : List[str] = self._prepare_for_class(_lowercase , _lowercase )
UpperCAmelCase__ : Union[str, Any] = torch.jit.trace(
_lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowercase , os.path.join(_lowercase , """traced_model.pt""" ) )
UpperCAmelCase__ : Any = torch.jit.load(os.path.join(_lowercase , """traced_model.pt""" ) , map_location=_lowercase )
loaded(inputs_dict["""input_ids"""].to(_lowercase ) , inputs_dict["""attention_mask"""].to(_lowercase ) )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
UpperCAmelCase__ : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
UpperCAmelCase__ : Any = model(_lowercase )[0]
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _lowercase )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1e-4 ) )
| 370 |
"""simple docstring"""
import numpy
# List of input, output pairs
_A = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_A = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
_A = [2, 4, 1, 5]
_A = len(train_data)
_A = 0.009
def a__ ( lowerCAmelCase , lowerCAmelCase="train" ) -> int:
return calculate_hypothesis_value(lowerCAmelCase , lowerCAmelCase ) - output(
lowerCAmelCase , lowerCAmelCase )
def a__ ( lowerCAmelCase ) -> int:
UpperCAmelCase__ : Dict = 0
for i in range(len(lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Any:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( lowerCAmelCase , lowerCAmelCase=m ) -> List[str]:
UpperCAmelCase__ : Optional[int] = 0
for i in range(lowerCAmelCase ):
if index == -1:
summation_value += _error(lowerCAmelCase )
else:
summation_value += _error(lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def a__ ( lowerCAmelCase ) -> List[Any]:
UpperCAmelCase__ : int = summation_of_cost_derivative(lowerCAmelCase , lowerCAmelCase ) / m
return cost_derivative_value
def a__ ( ) -> List[str]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCAmelCase__ : Any = 0.00_0002
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Union[str, Any] = 0
while True:
j += 1
UpperCAmelCase__ : str = [0, 0, 0, 0]
for i in range(0 , len(lowerCAmelCase ) ):
UpperCAmelCase__ : List[Any] = get_cost_derivative(i - 1 )
UpperCAmelCase__ : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCAmelCase , lowerCAmelCase , atol=lowerCAmelCase , rtol=lowerCAmelCase , ):
break
UpperCAmelCase__ : Any = temp_parameter_vector
print(("""Number of iterations:""", j) )
def a__ ( ) -> Optional[int]:
for i in range(len(lowerCAmelCase ) ):
print(("""Actual output value:""", output(lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 166 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_SCREAMING_SNAKE_CASE )
class __a ( _SCREAMING_SNAKE_CASE ):
__lowercase : Union[str, Any] = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
__lowercase : List[Any] = Features({'text': Value('string' )} )
__lowercase : int = Features({'summary': Value('string' )} )
__lowercase : Optional[int] = 'text'
__lowercase : Any = 'summary'
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 196 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__A : Optional[int] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__A : int = "UperNetConfig"
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Union[int, Tuple[int, int]] , lowerCamelCase : Union[int, Tuple[int, int], str] = 0 , lowerCamelCase : bool = False , lowerCamelCase : Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
lowerCAmelCase_ : int = nn.Convad(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , kernel_size=lowerCamelCase , padding=lowerCamelCase , bias=lowerCamelCase , dilation=lowerCamelCase , )
lowerCAmelCase_ : Dict = nn.BatchNormad(lowerCamelCase )
lowerCAmelCase_ : Dict = nn.ReLU()
def __lowercase ( self : Tuple , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
lowerCAmelCase_ : Optional[Any] = self.conv(lowerCamelCase )
lowerCAmelCase_ : Tuple = self.batch_norm(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = self.activation(lowerCamelCase )
return output
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ) -> None:
super().__init__()
lowerCAmelCase_ : str = [
nn.AdaptiveAvgPoolad(lowerCamelCase ),
UperNetConvModule(lowerCamelCase , lowerCamelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def __lowercase ( self : List[str] , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
lowerCAmelCase_ : List[Any] = input
for layer in self.layers:
lowerCAmelCase_ : Tuple = layer(lowerCamelCase )
return hidden_state
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : Tuple[int, ...] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : bool ) -> None:
super().__init__()
lowerCAmelCase_ : List[str] = pool_scales
lowerCAmelCase_ : Union[str, Any] = align_corners
lowerCAmelCase_ : Tuple = in_channels
lowerCAmelCase_ : List[str] = channels
lowerCAmelCase_ : Tuple = []
for i, pool_scale in enumerate(lowerCamelCase ):
lowerCAmelCase_ : Optional[int] = UperNetPyramidPoolingBlock(pool_scale=lowerCamelCase , in_channels=lowerCamelCase , channels=lowerCamelCase )
self.blocks.append(lowerCamelCase )
self.add_module(str(lowerCamelCase ) , lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : torch.Tensor ) -> List[torch.Tensor]:
lowerCAmelCase_ : Any = []
for ppm in self.blocks:
lowerCAmelCase_ : Any = ppm(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = nn.functional.interpolate(
lowerCamelCase , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(lowerCamelCase )
return ppm_outs
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : List[str] , lowerCamelCase : List[Any] ) -> Dict:
super().__init__()
lowerCAmelCase_ : List[Any] = config
lowerCAmelCase_ : Any = config.pool_scales # e.g. (1, 2, 3, 6)
lowerCAmelCase_ : Dict = in_channels
lowerCAmelCase_ : Any = config.hidden_size
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowerCAmelCase_ : Dict = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowerCAmelCase_ : Union[str, Any] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowerCAmelCase_ : Dict = nn.ModuleList()
lowerCAmelCase_ : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowerCAmelCase_ : List[str] = UperNetConvModule(lowerCamelCase , self.channels , kernel_size=1 )
lowerCAmelCase_ : Optional[int] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(lowerCamelCase )
self.fpn_convs.append(lowerCamelCase )
lowerCAmelCase_ : List[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __lowercase ( self : List[Any] ) -> Any:
self.apply(self._init_weights )
def __lowercase ( self : Optional[int] , lowerCamelCase : str ) -> List[Any]:
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __lowercase ( self : List[str] , lowerCamelCase : Optional[Any] ) -> Any:
lowerCAmelCase_ : Union[str, Any] = inputs[-1]
lowerCAmelCase_ : List[str] = [x]
psp_outs.extend(self.psp_modules(lowerCamelCase ) )
lowerCAmelCase_ : str = torch.cat(lowerCamelCase , dim=1 )
lowerCAmelCase_ : str = self.bottleneck(lowerCamelCase )
return output
def __lowercase ( self : Dict , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# build laterals
lowerCAmelCase_ : Optional[int] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(lowerCamelCase ) )
# build top-down path
lowerCAmelCase_ : Tuple = len(lowerCamelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCAmelCase_ : Union[str, Any] = laterals[i - 1].shape[2:]
lowerCAmelCase_ : Optional[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=lowerCamelCase , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
lowerCAmelCase_ : Optional[Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCAmelCase_ : Union[str, Any] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
lowerCAmelCase_ : Dict = torch.cat(lowerCamelCase , dim=1 )
lowerCAmelCase_ : Any = self.fpn_bottleneck(lowerCamelCase )
lowerCAmelCase_ : str = self.classifier(lowerCamelCase )
return output
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : Dict , lowerCamelCase : int = 2 , lowerCamelCase : int = 3 , lowerCamelCase : Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
lowerCAmelCase_ : List[Any] = config
lowerCAmelCase_ : Dict = config.auxiliary_in_channels
lowerCAmelCase_ : Optional[Any] = config.auxiliary_channels
lowerCAmelCase_ : Dict = config.auxiliary_num_convs
lowerCAmelCase_ : int = config.auxiliary_concat_input
lowerCAmelCase_ : List[Any] = in_index
lowerCAmelCase_ : List[Any] = (kernel_size // 2) * dilation
lowerCAmelCase_ : Tuple = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=lowerCamelCase , padding=lowerCamelCase , dilation=lowerCamelCase ) )
if self.num_convs == 0:
lowerCAmelCase_ : Optional[Any] = nn.Identity()
else:
lowerCAmelCase_ : List[str] = nn.Sequential(*lowerCamelCase )
if self.concat_input:
lowerCAmelCase_ : Union[str, Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=lowerCamelCase , padding=kernel_size // 2 )
lowerCAmelCase_ : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __lowercase ( self : int ) -> List[Any]:
self.apply(self._init_weights )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Optional[Any] ) -> Optional[Any]:
if isinstance(lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __lowercase ( self : Any , lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
lowerCAmelCase_ : Dict = encoder_hidden_states[self.in_index]
lowerCAmelCase_ : List[str] = self.convs(lowerCamelCase )
if self.concat_input:
lowerCAmelCase_ : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowerCAmelCase_ : Union[str, Any] = self.classifier(lowerCamelCase )
return output
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = UperNetConfig
lowercase = 'pixel_values'
lowercase = True
def __lowercase ( self : List[str] , lowerCamelCase : Dict ) -> Optional[Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __lowercase ( self : Optional[int] ) -> int:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __lowercase ( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Any=False ) -> Optional[int]:
if isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : str = value
__A : Union[str, Any] = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A : str = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' ,_SCREAMING_SNAKE_CASE ,)
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : List[Any] ) -> Union[str, Any]:
super().__init__(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowerCAmelCase_ : Optional[Any] = UperNetHead(lowerCamelCase , in_channels=self.backbone.channels )
lowerCAmelCase_ : List[Any] = UperNetFCNHead(lowerCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=lowerCamelCase , config_class=_CONFIG_FOR_DOC )
def __lowercase ( self : Any , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[torch.Tensor] = None , lowerCamelCase : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
lowerCAmelCase_ : Dict = self.backbone.forward_with_filtered_kwargs(
lowerCamelCase , output_hidden_states=lowerCamelCase , output_attentions=lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = outputs.feature_maps
lowerCAmelCase_ : Dict = self.decode_head(lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = nn.functional.interpolate(lowerCamelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=lowerCamelCase )
lowerCAmelCase_ : Tuple = None
if self.auxiliary_head is not None:
lowerCAmelCase_ : Optional[int] = self.auxiliary_head(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = nn.functional.interpolate(
lowerCamelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=lowerCamelCase )
lowerCAmelCase_ : str = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
lowerCAmelCase_ : str = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowerCAmelCase_ : int = loss_fct(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : List[str] = loss_fct(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : str = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowerCAmelCase_ : int = (logits,) + outputs[1:]
else:
lowerCAmelCase_ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=lowerCamelCase , logits=lowerCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 120 | 0 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> np.ndarray:
'''simple docstring'''
_snake_case : List[Any] = cva.getAffineTransform(__lowercase , __lowercase )
return cva.warpAffine(__lowercase , __lowercase , (rows, cols) )
if __name__ == "__main__":
# read original image
__SCREAMING_SNAKE_CASE : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
__SCREAMING_SNAKE_CASE : Union[str, Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE : List[Any] = gray_img.shape
# set different points to rotate image
__SCREAMING_SNAKE_CASE : int = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
__SCREAMING_SNAKE_CASE : str = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
__SCREAMING_SNAKE_CASE : Dict = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__SCREAMING_SNAKE_CASE : int = plt.figure(1)
__SCREAMING_SNAKE_CASE : Tuple = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show() | 284 | import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase_ :
def UpperCamelCase ( self ):
torch.manual_seed(0 )
_snake_case : Tuple = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_snake_case : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_snake_case : Optional[int] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_snake_case : str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0_001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
_snake_case : int = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase ( self ):
torch.manual_seed(0 )
_snake_case : List[str] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_snake_case : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_snake_case : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_snake_case : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0_001 , beta_end=0.02 , thresholding=lowercase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
_snake_case : List[str] = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
_snake_case : Dict = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCamelCase ( self ):
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : List[Any] = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case : Union[str, Any] = self.get_dummy_inputs(lowercase_ )
_snake_case : Union[str, Any] = inputs["prompt"]
_snake_case : Dict = inputs["generator"]
_snake_case : Any = inputs["num_inference_steps"]
_snake_case : Union[str, Any] = inputs["output_type"]
if "image" in inputs:
_snake_case : int = inputs["image"]
else:
_snake_case : Union[str, Any] = None
if "mask_image" in inputs:
_snake_case : int = inputs["mask_image"]
else:
_snake_case : List[str] = None
if "original_image" in inputs:
_snake_case : Tuple = inputs["original_image"]
else:
_snake_case : Any = None
_snake_case ,_snake_case : Optional[int] = pipe.encode_prompt(lowercase_ )
# inputs with prompt converted to embeddings
_snake_case : Dict = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
_snake_case : int = image
if mask_image is not None:
_snake_case : int = mask_image
if original_image is not None:
_snake_case : Any = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowercase_ , lowercase_ , lowercase_ )
_snake_case : Optional[int] = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
_snake_case : Any = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowercase_ , lowercase_ ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
_snake_case : int = self.get_dummy_inputs(lowercase_ )
_snake_case : Optional[int] = inputs["generator"]
_snake_case : List[Any] = inputs["num_inference_steps"]
_snake_case : Tuple = inputs["output_type"]
# inputs with prompt converted to embeddings
_snake_case : int = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
_snake_case : int = image
if mask_image is not None:
_snake_case : str = mask_image
if original_image is not None:
_snake_case : int = original_image
_snake_case : Optional[Any] = pipe_loaded(**lowercase_ )[0]
_snake_case : Dict = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1e-4 )
def UpperCamelCase ( self ):
_snake_case : Tuple = self.get_dummy_components()
_snake_case : Any = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
_snake_case : int = self.get_dummy_inputs(lowercase_ )
_snake_case : List[str] = pipe(**lowercase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowercase_ )
_snake_case : List[Any] = self.pipeline_class.from_pretrained(lowercase_ )
pipe_loaded.to(lowercase_ )
pipe_loaded.set_progress_bar_config(disable=lowercase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_snake_case : Optional[Any] = self.get_dummy_inputs(lowercase_ )
_snake_case : int = pipe_loaded(**lowercase_ )[0]
_snake_case : Tuple = np.abs(to_np(lowercase_ ) - to_np(lowercase_ ) ).max()
self.assertLess(lowercase_ , 1e-4 ) | 284 | 1 |
'''simple docstring'''
import math
import sys
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE_ ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
_lowerCAmelCase = [-1] * (number + 1)
_lowerCAmelCase = 0
for i in range(1 , number + 1 ):
_lowerCAmelCase = sys.maxsize
_lowerCAmelCase = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
for j in range(1 , root + 1 ):
_lowerCAmelCase = 1 + answers[i - (j**2)]
_lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 |
'''simple docstring'''
import math
import unittest
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> str:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _snake_case ( self ) -> List[Any]:
with self.assertRaises(_lowerCAmelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 158 | 1 |
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCAmelCase ( ) -> Dict:
snake_case_ = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=__lowerCAmelCase )
snake_case_ = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=__lowerCAmelCase )
env_command_parser(subparsers=__lowerCAmelCase )
launch_command_parser(subparsers=__lowerCAmelCase )
tpu_command_parser(subparsers=__lowerCAmelCase )
test_command_parser(subparsers=__lowerCAmelCase )
# Let's go
snake_case_ = parser.parse_args()
if not hasattr(__lowerCAmelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 362 | """simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 312 | 0 |
import argparse
from collections import defaultdict
import yaml
snake_case : Dict = '''docs/source/en/_toctree.yml'''
def __lowercase ( __lowerCAmelCase : List[Any] ):
a__ = defaultdict(__lowerCAmelCase )
a__ = []
a__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(__lowerCAmelCase )
a__ = new_doc_list
a__ = [key for key, value in counts.items() if value > 1]
a__ = []
for duplicate_key in duplicates:
a__ = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(__lowerCAmelCase ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
a__ = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__lowerCAmelCase ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(__lowerCAmelCase )
# Sort
return overview_doc
def __lowercase ( __lowerCAmelCase : str=False ):
with open(__lowerCAmelCase , encoding='utf-8' ) as f:
a__ = yaml.safe_load(f.read() )
# Get to the API doc
a__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
a__ = content[api_idx]['sections']
# Then to the model doc
a__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
a__ = api_doc[scheduler_idx]['sections']
a__ = clean_doc_toc(__lowerCAmelCase )
a__ = False
if new_scheduler_doc != scheduler_doc:
a__ = True
if overwrite:
a__ = new_scheduler_doc
if diff:
if overwrite:
a__ = api_doc
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__lowerCAmelCase , allow_unicode=__lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def __lowercase ( __lowerCAmelCase : Any=False ):
with open(__lowerCAmelCase , encoding='utf-8' ) as f:
a__ = yaml.safe_load(f.read() )
# Get to the API doc
a__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
a__ = content[api_idx]['sections']
# Then to the model doc
a__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
a__ = False
a__ = api_doc[pipeline_idx]['sections']
a__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
a__ = pipeline_doc['section']
a__ = clean_doc_toc(__lowerCAmelCase )
if overwrite:
a__ = new_sub_pipeline_doc
new_pipeline_docs.append(__lowerCAmelCase )
# sort overall pipeline doc
a__ = clean_doc_toc(__lowerCAmelCase )
if new_pipeline_docs != pipeline_docs:
a__ = True
if overwrite:
a__ = new_pipeline_docs
if diff:
if overwrite:
a__ = api_doc
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__lowerCAmelCase , allow_unicode=__lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
snake_case : str = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 240 |
import math
def __lowercase ( __lowerCAmelCase : int ):
a__ = [True] * n
a__ = False
a__ = False
a__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a__ = i * 2
while index < n:
a__ = False
a__ = index + i
a__ = [2]
for i in range(3 , __lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(__lowerCAmelCase )
return primes
def __lowercase ( __lowerCAmelCase : int = 9_9_9_9_6_6_6_6_3_3_3_3 ):
a__ = math.floor(math.sqrt(__lowerCAmelCase ) ) + 1_0_0
a__ = prime_sieve(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = primes[prime_index]
while (last_prime**2) <= limit:
a__ = primes[prime_index + 1]
a__ = last_prime**2
a__ = next_prime**2
# Get numbers divisible by lps(current)
a__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 240 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase :
def __init__( self :List[Any] , _lowercase :List[Any] , _lowercase :Dict=12 , _lowercase :Optional[int]=7 , _lowercase :Union[str, Any]=True , _lowercase :List[Any]=True , _lowercase :str=True , _lowercase :List[Any]=99 , _lowercase :int=32 , _lowercase :Optional[int]=32 , _lowercase :Dict=2 , _lowercase :str=4 , _lowercase :Dict=37 , _lowercase :List[str]=0.1 , _lowercase :Tuple=0.1 , _lowercase :Union[str, Any]=5_12 , _lowercase :int=0.02 , _lowercase :Tuple=0 , _lowercase :List[Any]=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = projection_dim
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = bos_token_id
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowercase__ = input_mask.numpy()
lowercase__ , lowercase__ = input_mask.shape
lowercase__ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowercase ):
lowercase__ = 1
lowercase__ = 0
lowercase__ = self.get_config()
return config, input_ids, tf.convert_to_tensor(_lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self :Tuple , _lowercase :int , _lowercase :Optional[Any] , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = TFBlipTextModel(config=_lowercase )
lowercase__ = model(_lowercase , attention_mask=_lowercase , training=_lowercase )
lowercase__ = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( lowercase_ , unittest.TestCase ):
__lowerCamelCase = (TFBlipTextModel,) if is_tf_available() else ()
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = BlipTextModelTester(self )
lowercase__ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFBlipTextModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def UpperCAmelCase ( self :str , _lowercase :Union[str, Any]=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=_lowercase )
| 369 |
from __future__ import annotations
from collections import deque
class lowerCAmelCase :
def __init__( self :List[Any] , _lowercase :list[str] ):
'''simple docstring'''
lowercase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_lowercase )
self.set_fail_transitions()
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :str ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCAmelCase ( self :List[str] , _lowercase :str ):
'''simple docstring'''
lowercase__ = 0
for character in keyword:
lowercase__ = self.find_next_state(_lowercase , _lowercase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowercase__ = len(self.adlist ) - 1
else:
lowercase__ = next_state
self.adlist[current_state]["output"].append(_lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(_lowercase )
lowercase__ = 0
while q:
lowercase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_lowercase )
lowercase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(_lowercase , self.adlist[child]["value"] ) is None
and state != 0
):
lowercase__ = self.adlist[state]["fail_state"]
lowercase__ = self.find_next_state(
_lowercase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
lowercase__ = 0
lowercase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def UpperCAmelCase ( self :Optional[Any] , _lowercase :str ):
'''simple docstring'''
lowercase__ = {} # returns a dict with keywords and list of its occurrences
lowercase__ = 0
for i in range(len(_lowercase ) ):
while (
self.find_next_state(_lowercase , string[i] ) is None
and current_state != 0
):
lowercase__ = self.adlist[current_state]["fail_state"]
lowercase__ = self.find_next_state(_lowercase , string[i] )
if next_state is None:
lowercase__ = 0
else:
lowercase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowercase__ = []
result[key].append(i - len(_lowercase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__lowercase = logging.get_logger(__name__)
class _A ( _a ):
"""simple docstring"""
def __init__( self : List[str] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : List[Any]):
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 40 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__A = spec.loader.load_module()
__A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def UpperCamelCase__ ( ):
snake_case : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case : Tuple = False
# source code of `config_class`
snake_case : Tuple = inspect.getsource(lowercase__ )
snake_case : Optional[int] = _re_checkpoint.findall(lowercase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case , snake_case : str = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case : Optional[int] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case : Any = True
break
snake_case : Optional[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowercase__ )
if len(lowercase__ ) > 0:
snake_case : Optional[Any] = "\n".join(sorted(lowercase__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 148 | 0 |
from collections.abc import Callable
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
a = a
a = b
if function(_a ) == 0: # one of the a or b is a root for the function
return a
elif function(_a ) == 0:
return b
elif (
function(_a ) * function(_a ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
a = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_a ) == 0:
return mid
elif function(_a ) * function(_a ) < 0:
a = mid
else:
a = mid
a = start + (end - start) / 2.0
return mid
def __A ( __lowerCamelCase ) -> Dict:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 365 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCamelCase )
a = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a = dataset_size < in_memory_max_size
else:
a = False
a = is_small_dataset(__lowerCamelCase )
assert result == expected
| 347 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCamelCase = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _a ( _lowercase):
_a : Any = VOCAB_FILES_NAMES
_a : List[str] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _a ( _lowercase):
_a : int = VOCAB_FILES_NAMES
_a : List[Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Any = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_lowercase)
class _a :
def __call__( self : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Union[bool, str] = False , _SCREAMING_SNAKE_CASE : Union[bool, str] = False , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , **_SCREAMING_SNAKE_CASE : str , )-> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
elif titles is None or texts is None:
lowerCAmelCase__ : Tuple = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : int = titles if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [titles]
lowerCAmelCase__ : Dict = texts if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [texts]
lowerCAmelCase__ : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = questions if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F'There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.' )
lowerCAmelCase__ : Union[str, Any] = super().__call__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
lowerCAmelCase__ : str = super().__call__(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
lowerCAmelCase__ : Dict = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
lowerCAmelCase__ : Union[str, Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCAmelCase__ : Tuple = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : BatchEncoding , _SCREAMING_SNAKE_CASE : DPRReaderOutput , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : int = 64 , _SCREAMING_SNAKE_CASE : int = 4 , )-> List[DPRSpanPrediction]:
lowerCAmelCase__ : Optional[int] = reader_input['''input_ids''']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = reader_output[:3]
lowerCAmelCase__ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = sorted(range(_SCREAMING_SNAKE_CASE ) , reverse=_SCREAMING_SNAKE_CASE , key=relevance_logits.__getitem__ )
lowerCAmelCase__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowerCAmelCase__ : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase__ : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase__ : Any = sequence_ids.index(self.pad_token_id )
else:
lowerCAmelCase__ : Optional[int] = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_SCREAMING_SNAKE_CASE , top_spans=_SCREAMING_SNAKE_CASE , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_SCREAMING_SNAKE_CASE , start_index=_SCREAMING_SNAKE_CASE , end_index=_SCREAMING_SNAKE_CASE , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , )-> List[DPRSpanPrediction]:
lowerCAmelCase__ : Union[str, Any] = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCAmelCase__ : List[Any] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] , reverse=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]' )
lowerCAmelCase__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_lowercase)
class _a ( _lowercase , _lowercase):
_a : List[str] = VOCAB_FILES_NAMES
_a : str = READER_PRETRAINED_VOCAB_FILES_MAP
_a : Optional[int] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Union[str, Any] = READER_PRETRAINED_INIT_CONFIGURATION
_a : Optional[int] = ['''input_ids''', '''attention_mask''']
| 131 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowerCamelCase = logging.getLogger(__name__)
class _a ( _lowercase):
_a : Dict = '''summarization'''
_a : int = ['''loss''']
_a : Optional[Any] = ROUGE_KEYS
_a : Any = '''rouge2'''
def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : Dict )-> int:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCAmelCase__ : int = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , mode=self.mode , **_SCREAMING_SNAKE_CASE )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
lowerCAmelCase__ : List[Any] = Path(self.output_dir ) / '''metrics.json'''
lowerCAmelCase__ : int = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : List[Any] = defaultdict(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = self.config.model_type
lowerCAmelCase__ : Any = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
lowerCAmelCase__ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCAmelCase__ : List[Any] = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
lowerCAmelCase__ : Union[str, Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCAmelCase__ : Tuple = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCAmelCase__ : List[str] = get_git_info()['''repo_sha''']
lowerCAmelCase__ : int = hparams.num_workers
lowerCAmelCase__ : Optional[int] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCAmelCase__ : List[Any] = self.decoder_start_token_id
lowerCAmelCase__ : int = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : List[str] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCAmelCase__ : int = self.hparams.eval_max_gen_length
else:
lowerCAmelCase__ : Tuple = self.model.config.max_length
lowerCAmelCase__ : List[str] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] )-> Dict[str, List[str]]:
lowerCAmelCase__ : Any = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_SCREAMING_SNAKE_CASE , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
lowerCAmelCase__ : Union[str, Any] = True
return readable_batch
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , **_SCREAMING_SNAKE_CASE : Tuple )-> Tuple:
return self.model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[int] )-> Tuple:
lowerCAmelCase__ : int = self.tokenizer.batch_decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
return lmap(str.strip , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : dict )-> Tuple:
lowerCAmelCase__ : Any = self.tokenizer.pad_token_id
lowerCAmelCase__ , lowerCAmelCase__ : int = batch['''input_ids'''], batch['''attention_mask''']
lowerCAmelCase__ : Optional[Any] = batch['''labels''']
if isinstance(self.model , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : List[str] = self.model._shift_right(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ : Any = shift_tokens_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCAmelCase__ : Any = decoder_input_ids
self.save_readable_batch(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = self(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCAmelCase__ : Optional[int] = nn.CrossEntropyLoss(ignore_index=_SCREAMING_SNAKE_CASE )
assert lm_logits.shape[-1] == self.vocab_size
lowerCAmelCase__ : Optional[Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = label_smoothed_nll_loss(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.hparams.label_smoothing , ignore_index=_SCREAMING_SNAKE_CASE )
return (loss,)
@property
def UpperCAmelCase__( self : Optional[Any] )-> int:
return self.tokenizer.pad_token_id
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )-> Dict:
lowerCAmelCase__ : List[Any] = self._step(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = dict(zip(self.loss_names , _SCREAMING_SNAKE_CASE ) )
# tokens per batch
lowerCAmelCase__ : Dict = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
lowerCAmelCase__ : Optional[int] = batch['''input_ids'''].shape[0]
lowerCAmelCase__ : List[Any] = batch['''input_ids'''].eq(self.pad ).sum()
lowerCAmelCase__ : List[str] = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] )-> Dict:
return self._generative_step(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple="val" )-> Dict:
self.step_count += 1
lowerCAmelCase__ : Any = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCAmelCase__ : Union[str, Any] = losses['''loss''']
lowerCAmelCase__ : Union[str, Any] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
lowerCAmelCase__ : Optional[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCAmelCase__ : torch.FloatTensor = torch.tensor(_SCREAMING_SNAKE_CASE ).type_as(_SCREAMING_SNAKE_CASE )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
lowerCAmelCase__ : Optional[Any] = self.step_count
self.metrics[prefix].append(_SCREAMING_SNAKE_CASE ) # callback writes this to self.metrics_save_path
lowerCAmelCase__ : Optional[int] = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] )-> Dict:
return calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : dict )-> dict:
lowerCAmelCase__ : Any = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCAmelCase__ : str = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_SCREAMING_SNAKE_CASE , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCAmelCase__ : List[str] = (time.time() - ta) / batch['''input_ids'''].shape[0]
lowerCAmelCase__ : List[str] = self.ids_to_clean_text(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = self.ids_to_clean_text(batch['''labels'''] )
lowerCAmelCase__ : Any = self._step(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = dict(zip(self.loss_names , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Dict = self.calc_generative_metrics(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = np.mean(lmap(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
base_metrics.update(gen_time=_SCREAMING_SNAKE_CASE , gen_len=_SCREAMING_SNAKE_CASE , preds=_SCREAMING_SNAKE_CASE , target=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return base_metrics
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] )-> Optional[int]:
return self._generative_step(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : int )-> str:
return self.validation_epoch_end(_SCREAMING_SNAKE_CASE , prefix='''test''' )
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : int )-> SeqaSeqDataset:
lowerCAmelCase__ : Dict = self.n_obs[type_path]
lowerCAmelCase__ : Dict = self.target_lens[type_path]
lowerCAmelCase__ : int = self.dataset_class(
self.tokenizer , type_path=_SCREAMING_SNAKE_CASE , n_obs=_SCREAMING_SNAKE_CASE , max_target_length=_SCREAMING_SNAKE_CASE , **self.dataset_kwargs , )
return dataset
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = False )-> DataLoader:
lowerCAmelCase__ : str = self.get_dataset(_SCREAMING_SNAKE_CASE )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCAmelCase__ : Any = dataset.make_sortish_sampler(_SCREAMING_SNAKE_CASE , distributed=self.hparams.gpus > 1 )
return DataLoader(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=_SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=_SCREAMING_SNAKE_CASE , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCAmelCase__ : Optional[int] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_SCREAMING_SNAKE_CASE , batch_sampler=_SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=_SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__( self : List[str] )-> DataLoader:
lowerCAmelCase__ : Dict = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_SCREAMING_SNAKE_CASE )
return dataloader
def UpperCAmelCase__( self : List[str] )-> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def UpperCAmelCase__( self : List[Any] )-> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str )-> List[str]:
BaseTransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
add_generic_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--max_tokens_per_batch''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--logger_name''' , type=_SCREAMING_SNAKE_CASE , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_SCREAMING_SNAKE_CASE , default=-1 , required=_SCREAMING_SNAKE_CASE , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_SCREAMING_SNAKE_CASE , default=500 , required=_SCREAMING_SNAKE_CASE , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_SCREAMING_SNAKE_CASE , default=-1 , required=_SCREAMING_SNAKE_CASE , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_SCREAMING_SNAKE_CASE , default='''summarization''' , required=_SCREAMING_SNAKE_CASE , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_SCREAMING_SNAKE_CASE , default=0.0 , required=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--src_lang''' , type=_SCREAMING_SNAKE_CASE , default='''''' , required=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--tgt_lang''' , type=_SCREAMING_SNAKE_CASE , default='''''' , required=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--eval_beams''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--val_metric''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_SCREAMING_SNAKE_CASE , default=1 , required=_SCREAMING_SNAKE_CASE , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_SCREAMING_SNAKE_CASE , default=-1 , required=_SCREAMING_SNAKE_CASE , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class _a ( _lowercase):
_a : Tuple = '''translation'''
_a : Union[str, Any] = ['''loss''']
_a : int = ['''bleu''']
_a : List[str] = '''bleu'''
def __init__( self : Optional[int] , _SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Any )-> Optional[int]:
super().__init__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = hparams.src_lang
lowerCAmelCase__ : str = hparams.tgt_lang
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple )-> dict:
return calculate_bleu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( _a , _a=None ):
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=_a )
check_output_dir(_a , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCAmelCase__ : SummarizationModule = SummarizationModule(_a )
else:
lowerCAmelCase__ : SummarizationModule = TranslationModule(_a )
lowerCAmelCase__ : Optional[int] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
lowerCAmelCase__ : Any = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase__ : Optional[int] = os.environ.get('''WANDB_PROJECT''' , _a )
lowerCAmelCase__ : Union[str, Any] = WandbLogger(name=model.output_dir.name , project=_a )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase__ : List[Any] = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowerCAmelCase__ : Optional[Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCAmelCase__ : int = False
lowerCAmelCase__ : str = args.val_metric == '''loss'''
lowerCAmelCase__ : pl.Trainer = generic_train(
_a , _a , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _a ) , early_stopping_callback=_a , logger=_a , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
lowerCAmelCase__ : Any = ''''''
lowerCAmelCase__ : Tuple = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=_a ) )
if checkpoints:
lowerCAmelCase__ : List[str] = checkpoints[-1]
lowerCAmelCase__ : Any = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
lowerCamelCase = pl.Trainer.add_argparse_args(parser)
lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowerCamelCase = parser.parse_args()
main(args)
| 131 | 1 |
'''simple docstring'''
__lowerCAmelCase : Optional[int] ="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def UpperCamelCase ( ):
A__ = input("Enter message: " )
A__ = input("Enter key [alphanumeric]: " )
A__ = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
A__ = "encrypt"
A__ = encrypt_message(_lowerCamelCase , _lowerCamelCase )
elif mode.lower().startswith("d" ):
A__ = "decrypt"
A__ = decrypt_message(_lowerCamelCase , _lowerCamelCase )
print(F"\n{mode.title()}ed message:" )
print(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : str ):
return translate_message(_lowerCamelCase , _lowerCamelCase , "encrypt" )
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : str ):
return translate_message(_lowerCamelCase , _lowerCamelCase , "decrypt" )
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str ):
A__ = []
A__ = 0
A__ = key.upper()
for symbol in message:
A__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCamelCase ):
A__ = 0
else:
translated.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
main()
| 123 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCAmelCase : Optional[Any] ="\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__lowerCAmelCase : Union[str, Any] ="\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__lowerCAmelCase : str ="\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCamelCase ( _lowerCamelCase : List[Any] ):
def remove_articles(_lowerCamelCase : Dict ):
A__ = re.compile(r"\b(a|an|the)\b" , re.UNICODE )
return re.sub(_lowerCamelCase , " " , _lowerCamelCase )
def white_space_fix(_lowerCamelCase : Tuple ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase : int ):
A__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) )
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ):
return int(normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase ) )
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
A__ = [any(compute_exact(_lowerCamelCase , _lowerCamelCase ) for ref in refs ) for pred, refs in zip(_lowerCamelCase , _lowerCamelCase )]
return (sum(_lowerCamelCase ) / len(_lowerCamelCase )) * 1_00
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str ):
A__ = [rgram for rgrams in rgramslist for rgram in rgrams]
A__ = Counter(_lowerCamelCase )
A__ = Counter(_lowerCamelCase )
A__ = Counter()
for sgram, scount in sgramcounter.items():
A__ = scount * numref
A__ = Counter(_lowerCamelCase )
A__ = Counter()
for cgram, ccount in cgramcounter.items():
A__ = ccount * numref
# KEEP
A__ = sgramcounter_rep & cgramcounter_rep
A__ = keepgramcounter_rep & rgramcounter
A__ = sgramcounter_rep & rgramcounter
A__ = 0
A__ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
A__ = 1
if len(_lowerCamelCase ) > 0:
A__ = keeptmpscorea / len(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
A__ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
A__ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
A__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
A__ = sgramcounter_rep - cgramcounter_rep
A__ = delgramcounter_rep - rgramcounter
A__ = sgramcounter_rep - rgramcounter
A__ = 0
A__ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
if len(_lowerCamelCase ) > 0:
A__ = deltmpscorea / len(_lowerCamelCase )
# ADDITION
A__ = set(_lowerCamelCase ) - set(_lowerCamelCase )
A__ = set(_lowerCamelCase ) & set(_lowerCamelCase )
A__ = set(_lowerCamelCase ) - set(_lowerCamelCase )
A__ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A__ = 1
A__ = 1
if len(_lowerCamelCase ) > 0:
A__ = addtmpscore / len(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
A__ = addtmpscore / len(_lowerCamelCase )
A__ = 0
if addscore_precision > 0 or addscore_recall > 0:
A__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
A__ = len(_lowerCamelCase )
A__ = ssent.split(" " )
A__ = csent.split(" " )
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
A__ = []
for rsent in rsents:
A__ = rsent.split(" " )
A__ = []
A__ = []
A__ = []
ragramslist.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
A__ = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
A__ = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
A__ = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
A__ = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
A__ = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
A__ = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
A__ = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
A__ = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
A__ = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(_lowerCamelCase )
((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((A__), (A__), (A__)) = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
A__ = sum([delascore, delascore, delascore, delascore] ) / 4
A__ = sum([addascore, addascore, addascore, addascore] ) / 4
A__ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCamelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : bool = True , _lowerCamelCase : str = "13a" , _lowerCamelCase : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
A__ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
A__ = sacrebleu.metrics.bleu._get_tokenizer(_lowerCamelCase )()(_lowerCamelCase )
else:
A__ = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCamelCase )
elif tokenizer == "moses":
A__ = sacremoses.MosesTokenizer().tokenize(_lowerCamelCase , return_str=_lowerCamelCase , escape=_lowerCamelCase )
elif tokenizer == "penn":
A__ = sacremoses.MosesTokenizer().penn_tokenize(_lowerCamelCase , return_str=_lowerCamelCase )
else:
A__ = sentence
if not return_str:
A__ = normalized_sent.split()
return normalized_sent
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ):
if not (len(_lowerCamelCase ) == len(_lowerCamelCase ) == len(_lowerCamelCase )):
raise ValueError("Sources length must match predictions and references lengths." )
A__ = 0
for src, pred, refs in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
sari_score += SARIsent(normalize(_lowerCamelCase ) , normalize(_lowerCamelCase ) , [normalize(_lowerCamelCase ) for sent in refs] )
A__ = sari_score / len(_lowerCamelCase )
return 1_00 * sari_score
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : List[Any]="exp" , _lowerCamelCase : int=None , _lowerCamelCase : str=False , _lowerCamelCase : List[str]=False , _lowerCamelCase : Dict=False , ):
A__ = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A__ = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
A__ = sacrebleu.corpus_bleu(
_lowerCamelCase , _lowerCamelCase , smooth_method=_lowerCamelCase , smooth_value=_lowerCamelCase , force=_lowerCamelCase , lowercase=_lowerCamelCase , use_effective_order=_lowerCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def UpperCAmelCase_ ( self :Any )-> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def UpperCAmelCase_ ( self :str , lowercase_ :Dict , lowercase_ :List[Any] , lowercase_ :int )-> int:
A__ = {}
result.update({"sari": compute_sari(sources=lowercase_ , predictions=lowercase_ , references=lowercase_ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=lowercase_ , references=lowercase_ )} )
result.update({"exact": compute_em(predictions=lowercase_ , references=lowercase_ )} )
return result
| 123 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Union[str, Any] = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
A__ : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
A__ : List[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
A__ : Optional[int] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int=None , snake_case__ : Optional[int]=1 , snake_case__ : int="binary" , snake_case__ : List[str]=None ):
lowerCamelCase_ : str =fa_score(
snake_case__ , snake_case__ , labels=snake_case__ , pos_label=snake_case__ , average=snake_case__ , sample_weight=snake_case__ )
return {"f1": float(snake_case__ ) if score.size == 1 else score}
| 144 | 1 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_A = logging.getLogger(__name__)
@dataclass(frozen=__UpperCAmelCase )
class A :
__snake_case = 42
__snake_case = 42
__snake_case = None
__snake_case = None
__snake_case = None
@dataclass(frozen=__UpperCAmelCase )
class A :
__snake_case = 42
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A ( __UpperCAmelCase ):
__snake_case = 42
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__=False, UpperCamelCase__ = False, ):
"""simple docstring"""
lowerCAmelCase_ = hans_processors[task]()
lowerCAmelCase_ = os.path.join(
UpperCamelCase__, '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''', tokenizer.__class__.__name__, str(UpperCamelCase__ ), UpperCamelCase__, ), )
lowerCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase_ , lowerCAmelCase_ = label_list[2], label_list[1]
lowerCAmelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase_ = cached_features_file + '''.lock'''
with FileLock(UpperCamelCase__ ):
if os.path.exists(UpperCamelCase__ ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
lowerCAmelCase_ = torch.load(UpperCamelCase__ )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
lowerCAmelCase_ = (
processor.get_dev_examples(UpperCamelCase__ ) if evaluate else processor.get_train_examples(UpperCamelCase__ )
)
logger.info('''Training examples: %s''', len(UpperCamelCase__ ) )
lowerCAmelCase_ = hans_convert_examples_to_features(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
logger.info('''Saving features into cached file %s''', UpperCamelCase__ )
torch.save(self.features, UpperCamelCase__ )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, UpperCamelCase__ ):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class A :
__snake_case = 42
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = 128, UpperCamelCase__=False, UpperCamelCase__ = False, ):
"""simple docstring"""
lowerCAmelCase_ = hans_processors[task]()
lowerCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase_ , lowerCAmelCase_ = label_list[2], label_list[1]
lowerCAmelCase_ = label_list
lowerCAmelCase_ = processor.get_dev_examples(UpperCamelCase__ ) if evaluate else processor.get_train_examples(UpperCamelCase__ )
lowerCAmelCase_ = hans_convert_examples_to_features(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ), desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(UpperCamelCase__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCAmelCase_ = tf.data.Dataset.from_generator(
UpperCamelCase__, (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
), (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
), )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, UpperCamelCase__ ):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.label_list
class A ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase__, '''heuristics_train_set.txt''' ) ), '''train''' )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase__, '''heuristics_evaluation_set.txt''' ) ), '''dev''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = []
for i, line in enumerate(UpperCamelCase__ ):
if i == 0:
continue
lowerCAmelCase_ = '''%s-%s''' % (set_type, line[0])
lowerCAmelCase_ = line[5]
lowerCAmelCase_ = line[6]
lowerCAmelCase_ = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
lowerCAmelCase_ = line[0]
examples.append(InputExample(guid=UpperCamelCase__, text_a=UpperCamelCase__, text_b=UpperCamelCase__, label=UpperCamelCase__, pairID=UpperCamelCase__ ) )
return examples
def __UpperCamelCase ( _A , _A , _A , _A , ):
lowerCAmelCase_ = {label: i for i, label in enumerate(_A )}
lowerCAmelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(_A ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
lowerCAmelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=_A , max_length=_A , padding='''max_length''' , truncation=_A , return_overflowing_tokens=_A , )
lowerCAmelCase_ = label_map[example.label] if example.label in label_map else 0
lowerCAmelCase_ = int(example.pairID )
features.append(InputFeatures(**_A , label=_A , pairID=_A ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"guid: {example}" )
logger.info(f"features: {features[i]}" )
return features
_A = {
'''hans''': 3,
}
_A = {
'''hans''': HansProcessor,
}
| 360 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( __UpperCAmelCase ):
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'OwlViTImageProcessor'
__snake_case = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self, UpperCamelCase__=None, UpperCamelCase__=None, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', UpperCamelCase__, )
lowerCAmelCase_ = kwargs.pop('''feature_extractor''' )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__, UpperCamelCase__ )
def __call__( self, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__="max_length", UpperCamelCase__="np", **UpperCamelCase__ ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(UpperCamelCase__, UpperCamelCase__ ) or (isinstance(UpperCamelCase__, UpperCamelCase__ ) and not isinstance(text[0], UpperCamelCase__ )):
lowerCAmelCase_ = [self.tokenizer(UpperCamelCase__, padding=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__ )]
elif isinstance(UpperCamelCase__, UpperCamelCase__ ) and isinstance(text[0], UpperCamelCase__ ):
lowerCAmelCase_ = []
# Maximum number of queries across batch
lowerCAmelCase_ = max([len(UpperCamelCase__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCamelCase__ ) != max_num_queries:
lowerCAmelCase_ = t + [''' '''] * (max_num_queries - len(UpperCamelCase__ ))
lowerCAmelCase_ = self.tokenizer(UpperCamelCase__, padding=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__ )
encodings.append(UpperCamelCase__ )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCAmelCase_ = np.concatenate([encoding['''input_ids'''] for encoding in encodings], axis=0 )
lowerCAmelCase_ = np.concatenate([encoding['''attention_mask'''] for encoding in encodings], axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase_ = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings], axis=0 )
lowerCAmelCase_ = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings], axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase_ = torch.cat([encoding['''input_ids'''] for encoding in encodings], dim=0 )
lowerCAmelCase_ = torch.cat([encoding['''attention_mask'''] for encoding in encodings], dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase_ = tf.stack([encoding['''input_ids'''] for encoding in encodings], axis=0 )
lowerCAmelCase_ = tf.stack([encoding['''attention_mask'''] for encoding in encodings], axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCAmelCase_ = BatchEncoding()
lowerCAmelCase_ = input_ids
lowerCAmelCase_ = attention_mask
if query_images is not None:
lowerCAmelCase_ = BatchEncoding()
lowerCAmelCase_ = self.image_processor(
UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__ ).pixel_values
lowerCAmelCase_ = query_pixel_values
if images is not None:
lowerCAmelCase_ = self.image_processor(UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__ )
if text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ), tensor_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.image_processor.post_process(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__, **UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', UpperCamelCase__, )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', UpperCamelCase__, )
return self.image_processor
| 167 | 0 |
'''simple docstring'''
from ... import PretrainedConfig
UpperCamelCase_ = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Tuple = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
A : Optional[Any] = '''nezha'''
def __init__( self, A=21_128, A=768, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=512, A=64, A=2, A=0.02, A=1E-12, A=0.1, A=0, A=2, A=3, A=True, **A, ):
'''simple docstring'''
super().__init__(pad_token_id=A, bos_token_id=A, eos_token_id=A, **A )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = max_relative_position
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = classifier_dropout
SCREAMING_SNAKE_CASE : Optional[int] = use_cache
| 251 |
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "bert":
UpperCamelCase_ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCamelCase_ = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
UpperCamelCase_ = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
UpperCamelCase_ = state_dict["cls.predictions.decoder.weight"]
UpperCamelCase_ = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[F"""cls.predictions.transform.dense.{w}"""]
UpperCamelCase_ = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 251 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __a :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Any = batch_size
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : List[Any] = is_training
SCREAMING_SNAKE_CASE__ : str = use_labels
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ : int = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : List[Any] = num_patches + 1
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : int = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> List[str]:
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _a ( self , _a , _a , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _a , _a , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[Any] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
SCREAMING_SNAKE_CASE__ : List[str] = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE :List[Any] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Dict = False
_SCREAMING_SNAKE_CASE :Dict = False
_SCREAMING_SNAKE_CASE :Optional[Any] = False
_SCREAMING_SNAKE_CASE :str = False
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTMSNModelTester(self )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _a ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def _a ( self ) -> List[Any]:
"""simple docstring"""
pass
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def _a ( self ) -> List[Any]:
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Dict = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowercase ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a (unittest.TestCase):
'''simple docstring'''
@cached_property
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def _a ( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE__ : int = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
SCREAMING_SNAKE_CASE__ : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE__ : str = prepare_img()
SCREAMING_SNAKE_CASE__ : int = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**_a )
# verify the logits
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
| 56 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __a (metaclass=UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = ["""transformers""", """torch""", """note_seq"""]
def __init__( self , *_a , **_a ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> int:
"""simple docstring"""
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 56 | 1 |
def _a ( SCREAMING_SNAKE_CASE_ : list ):
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE_ )]
__lowerCAmelCase = []
def generate(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__lowerCAmelCase , __lowerCAmelCase = arr[k - 1], arr[i]
else: # k is odd
__lowerCAmelCase , __lowerCAmelCase = arr[k - 1], arr[0]
generate(k - 1 , SCREAMING_SNAKE_CASE_ )
generate(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return res
if __name__ == "__main__":
UpperCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 92 |
'''simple docstring'''
class lowercase :
"""simple docstring"""
def __init__( self ) -> List[str]:
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Optional[int] = {}
def _snake_case ( self ,a_ ) -> Optional[Any]:
if vertex not in self.adjacency:
_UpperCAmelCase : int = {}
self.num_vertices += 1
def _snake_case ( self ,a_ ,a_ ,a_ ) -> int:
self.add_vertex(a_ )
self.add_vertex(a_ )
if head == tail:
return
_UpperCAmelCase : List[Any] = weight
_UpperCAmelCase : Dict = weight
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Optional[int] = self.get_edges()
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Dict = edge
edges.remove((tail, head, weight) )
for i in range(len(a_ ) ):
_UpperCAmelCase : str = list(edges[i] )
edges.sort(key=lambda a_ : e[2] )
for i in range(len(a_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_UpperCAmelCase : Optional[Any] = edges[i][2] + 1
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = edge
_UpperCAmelCase : str = weight
_UpperCAmelCase : List[str] = weight
def __str__( self ) -> Any:
_UpperCAmelCase : List[Any] = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_UpperCAmelCase : List[str] = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def _snake_case ( self ) -> str:
_UpperCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _snake_case ( self ) -> Optional[int]:
return self.adjacency.keys()
@staticmethod
def _snake_case ( a_=None ,a_=None ) -> Tuple:
_UpperCAmelCase : List[Any] = Graph()
if vertices is None:
_UpperCAmelCase : List[str] = []
if edges is None:
_UpperCAmelCase : Optional[Any] = []
for vertex in vertices:
g.add_vertex(a_ )
for edge in edges:
g.add_edge(*a_ )
return g
class lowercase :
"""simple docstring"""
def __init__( self ) -> int:
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : int = {}
def __len__( self ) -> Tuple:
return len(self.parent )
def _snake_case ( self ,a_ ) -> str:
if item in self.parent:
return self.find(a_ )
_UpperCAmelCase : Optional[Any] = item
_UpperCAmelCase : List[Any] = 0
return item
def _snake_case ( self ,a_ ) -> List[str]:
if item not in self.parent:
return self.make_set(a_ )
if item != self.parent[item]:
_UpperCAmelCase : List[Any] = self.find(self.parent[item] )
return self.parent[item]
def _snake_case ( self ,a_ ,a_ ) -> Union[str, Any]:
_UpperCAmelCase : Any = self.find(a_ )
_UpperCAmelCase : List[str] = self.find(a_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_UpperCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_UpperCAmelCase : Any = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_UpperCAmelCase : List[str] = roota
return roota
return None
@staticmethod
def _snake_case ( a_ ) -> List[Any]:
_UpperCAmelCase : int = graph.num_vertices
_UpperCAmelCase : int = Graph.UnionFind()
_UpperCAmelCase : Optional[int] = []
while num_components > 1:
_UpperCAmelCase : int = {}
for vertex in graph.get_vertices():
_UpperCAmelCase : Union[str, Any] = -1
_UpperCAmelCase : Tuple = graph.get_edges()
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = edge
_UpperCAmelCase : Any = union_find.find(a_ )
_UpperCAmelCase : Any = union_find.find(a_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase : Tuple = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_UpperCAmelCase : List[str] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : str = cheap_edge[vertex]
if union_find.find(a_ ) != union_find.find(a_ ):
union_find.union(a_ ,a_ )
mst_edges.append(cheap_edge[vertex] )
_UpperCAmelCase : Tuple = num_components - 1
_UpperCAmelCase : Optional[int] = Graph.build(edges=a_ )
return mst
| 215 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[str] = {}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Union[str, Any] = "llama"
__magic_name__ : Union[str, Any] = ["past_key_values"]
def __init__( self : List[Any] , lowerCAmelCase : Any=32000 , lowerCAmelCase : Any=4096 , lowerCAmelCase : int=11008 , lowerCAmelCase : Any=32 , lowerCAmelCase : Any=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="silu" , lowerCAmelCase : List[Any]=2048 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : Any=1E-6 , lowerCAmelCase : Dict=True , lowerCAmelCase : int=0 , lowerCAmelCase : Union[str, Any]=1 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Dict=1 , lowerCAmelCase : List[str]=False , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : List[str] , )-> int:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = intermediate_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase = num_attention_heads
UpperCAmelCase = num_key_value_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = rms_norm_eps
UpperCAmelCase = pretraining_tp
UpperCAmelCase = use_cache
UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , **lowerCAmelCase , )
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
UpperCAmelCase = self.rope_scaling.get('''type''' , lowerCAmelCase )
UpperCAmelCase = self.rope_scaling.get('''factor''' , lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase , lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 371 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : str = logging.get_logger(__name__)
_lowercase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowercase : Tuple = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
_lowercase : Union[str, Any] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[Any] = GPTaTokenizer
def __init__( self : Tuple , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Tuple="<|endoftext|>" , lowerCAmelCase : Union[str, Any]="<|endoftext|>" , lowerCAmelCase : Union[str, Any]="<|endoftext|>" , lowerCAmelCase : Optional[int]=False , **lowerCAmelCase : Tuple , )-> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''add_bos_token''' , lowerCAmelCase )
UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase = getattr(lowerCAmelCase , pre_tok_state.pop('''type''' ) )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = pre_tok_class(**lowerCAmelCase )
UpperCAmelCase = add_prefix_space
def a__( self : Union[str, Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Dict )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def a__( self : List[Any] , lowerCAmelCase : "Conversation" )-> List[int]:
"""simple docstring"""
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 91 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''timm_backbone'''
def __init__( self :List[str] , snake_case :Union[str, Any]=None , snake_case :Any=3 , snake_case :Union[str, Any]=True , snake_case :Any=True , snake_case :Optional[Any]=None , **snake_case :List[Any] , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : Optional[Any] = backbone
A_ : Dict = num_channels
A_ : str = features_only
A_ : Tuple = use_pretrained_backbone
A_ : List[Any] = True
A_ : Optional[Any] = out_indices if out_indices is not None else (-1,)
| 300 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : List[Any] = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : Any = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = RobertaTokenizer
def __init__( self :Dict , snake_case :List[str]=None , snake_case :List[Any]=None , snake_case :Union[str, Any]=None , snake_case :List[str]="replace" , snake_case :Tuple="<s>" , snake_case :Union[str, Any]="</s>" , snake_case :str="</s>" , snake_case :Union[str, Any]="<s>" , snake_case :int="<unk>" , snake_case :Tuple="<pad>" , snake_case :List[str]="<mask>" , snake_case :Any=False , snake_case :Union[str, Any]=True , **snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
A_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : Dict = getattr(snake_case , pre_tok_state.pop("type" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**snake_case )
A_ : Optional[int] = add_prefix_space
A_ : Optional[int] = "post_processor"
A_ : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
A_ : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : List[Any] = tuple(state["sep"] )
if "cls" in state:
A_ : Optional[Any] = tuple(state["cls"] )
A_ : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : List[Any] = add_prefix_space
A_ : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
A_ : List[str] = trim_offsets
A_ : Any = True
if changes_to_apply:
A_ : Optional[Any] = getattr(snake_case , state.pop("type" ) )
A_ : Any = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Dict ):
'''simple docstring'''
A_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
A_ : Any = value
def SCREAMING_SNAKE_CASE ( self :Dict , *snake_case :Tuple , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Any = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , *snake_case :str , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Any = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Optional[Any]=None ):
'''simple docstring'''
A_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Any = [self.sep_token_id]
A_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 300 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
@slow
@require_torch
def UpperCamelCase ( self ):
A__ = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''','''prajjwal1/bert-tiny''' )
A__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
A__ = bertabert.config.encoder.vocab_size
A__ = tokenizer.sep_token_id
A__ = tokenizer.cls_token_id
A__ = 128
A__ = datasets.load_dataset('''cnn_dailymail''','''3.0.0''',split='''train[:1%]''' )
A__ = datasets.load_dataset('''cnn_dailymail''','''3.0.0''',split='''validation[:1%]''' )
A__ = train_dataset.select(range(32 ) )
A__ = val_dataset.select(range(16 ) )
A__ = 4
def _map_to_encoder_decoder_inputs(__lowerCamelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
A__ = tokenizer(batch['''article'''],padding='''max_length''',truncation=__lowerCamelCase,max_length=512 )
A__ = tokenizer(batch['''highlights'''],padding='''max_length''',truncation=__lowerCamelCase,max_length=128 )
A__ = inputs.input_ids
A__ = inputs.attention_mask
A__ = outputs.input_ids
A__ = outputs.input_ids.copy()
A__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
A__ = outputs.attention_mask
assert all(len(__lowerCamelCase ) == 512 for x in inputs.input_ids )
assert all(len(__lowerCamelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCamelCase ):
A__ = pred.label_ids
A__ = pred.predictions
# all unnecessary tokens are removed
A__ = tokenizer.batch_decode(__lowerCamelCase,skip_special_tokens=__lowerCamelCase )
A__ = tokenizer.batch_decode(__lowerCamelCase,skip_special_tokens=__lowerCamelCase )
A__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCamelCase ) )] ) / len(__lowerCamelCase )
return {"accuracy": accuracy}
# map train dataset
A__ = train_dataset.map(
_map_to_encoder_decoder_inputs,batched=__lowerCamelCase,batch_size=__lowerCamelCase,remove_columns=['''article''', '''highlights'''],)
train_dataset.set_format(
type='''torch''',columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''],)
# same for validation dataset
A__ = val_dataset.map(
_map_to_encoder_decoder_inputs,batched=__lowerCamelCase,batch_size=__lowerCamelCase,remove_columns=['''article''', '''highlights'''],)
val_dataset.set_format(
type='''torch''',columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''],)
A__ = self.get_auto_remove_tmp_dir()
A__ = SeqaSeqTrainingArguments(
output_dir=__lowerCamelCase,per_device_train_batch_size=__lowerCamelCase,per_device_eval_batch_size=__lowerCamelCase,predict_with_generate=__lowerCamelCase,evaluation_strategy='''steps''',do_train=__lowerCamelCase,do_eval=__lowerCamelCase,warmup_steps=0,eval_steps=2,logging_steps=2,)
# instantiate trainer
A__ = SeqaSeqTrainer(
model=__lowerCamelCase,args=__lowerCamelCase,compute_metrics=_compute_metrics,train_dataset=__lowerCamelCase,eval_dataset=__lowerCamelCase,tokenizer=__lowerCamelCase,)
# start training
trainer.train()
| 39 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
a__: str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
a__: list[int] = [ord(letter) for letter in string.ascii_lowercase]
a__: set[int] = {ord(char) for char in VALID_CHARS}
a__: list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCamelCase__( UpperCamelCase__ : list[int] , UpperCamelCase__ : tuple[int, ...] )->str | None:
A__ = ""
A__ = 42
A__ = 42
A__ = 42
for keychar, cipherchar in zip(cycle(UpperCamelCase__ ) , UpperCamelCase__ ):
A__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCamelCase__ )
return decoded
def UpperCamelCase__( UpperCamelCase__ : list[int] )->list[str]:
A__ = []
for key in product(UpperCamelCase__ , repeat=3 ):
A__ = try_key(UpperCamelCase__ , UpperCamelCase__ )
if encoded is not None:
possibles.append(UpperCamelCase__ )
return possibles
def UpperCamelCase__( UpperCamelCase__ : list[str] , UpperCamelCase__ : str )->list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCamelCase__( UpperCamelCase__ : str = "p059_cipher.txt" )->int:
A__ = 42
A__ = 42
A__ = 42
A__ = 42
A__ = Path(UpperCamelCase__ ).parent.joinpath(UpperCamelCase__ ).read_text(encoding='''utf-8''' )
A__ = [int(UpperCamelCase__ ) for number in data.strip().split(''',''' )]
A__ = filter_valid_chars(UpperCamelCase__ )
for common_word in COMMON_WORDS:
A__ = filter_common_word(UpperCamelCase__ , UpperCamelCase__ )
if len(UpperCamelCase__ ) == 1:
break
A__ = possibles[0]
return sum(ord(UpperCamelCase__ ) for char in decoded_text )
if __name__ == "__main__":
print(F"{solution() = }")
| 39 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 28 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """trajectory_transformer"""
_UpperCamelCase = ["""past_key_values"""]
_UpperCamelCase = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , A_=100 , A_=5 , A_=1 , A_=1 , A_=249 , A_=6 , A_=17 , A_=25 , A_=4 , A_=4 , A_=128 , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0_006 , A_=512 , A_=0.02 , A_=1e-12 , A_=1 , A_=True , A_=1 , A_=5_0256 , A_=5_0256 , **A_ , ) ->int:
'''simple docstring'''
__lowerCAmelCase : Any = vocab_size
__lowerCAmelCase : Tuple = action_weight
__lowerCAmelCase : Tuple = reward_weight
__lowerCAmelCase : Union[str, Any] = value_weight
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : str = block_size
__lowerCAmelCase : Optional[Any] = action_dim
__lowerCAmelCase : Union[str, Any] = observation_dim
__lowerCAmelCase : Union[str, Any] = transition_dim
__lowerCAmelCase : Dict = learning_rate
__lowerCAmelCase : Any = n_layer
__lowerCAmelCase : Any = n_head
__lowerCAmelCase : Optional[int] = n_embd
__lowerCAmelCase : str = embd_pdrop
__lowerCAmelCase : Dict = attn_pdrop
__lowerCAmelCase : Optional[int] = resid_pdrop
__lowerCAmelCase : Union[str, Any] = initializer_range
__lowerCAmelCase : Optional[int] = layer_norm_eps
__lowerCAmelCase : Any = kaiming_initializer_range
__lowerCAmelCase : List[str] = use_cache
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
| 275 | 0 |
import json
import sys
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
lowercase__ = json.load(_SCREAMING_SNAKE_CASE )
lowercase__ = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
lowercase__ = results[benchmark_name]
lowercase__ = benchmark_name.split('/' )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
lowercase__ = '| metric |'
lowercase__ = '|--------|'
lowercase__ = '| new / old (diff) |'
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
lowercase__ = benchmark_res[metric_name]
lowercase__ = metric_vals['new']
lowercase__ = metric_vals.get('old' , _SCREAMING_SNAKE_CASE )
lowercase__ = metric_vals.get('diff' , _SCREAMING_SNAKE_CASE )
lowercase__ = F""" {new_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else 'None'
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
lowercase_ = sys.argv[1]
lowercase_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 363 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ) -> Dict:
lowercase__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase__ = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
lowercase__ = model
lowercase__ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = model.module
if not keep_fpaa_wrapper:
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
lowercase__ = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
lowercase__ = forward.__wrapped__
if forward == original_forward:
break
lowercase__ = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
lowercase__ = model
lowercase__ = compiled_model
return model
def __UpperCamelCase () -> Tuple:
PartialState().wait_for_everyone()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def __UpperCamelCase (**_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
for key, value in kwargs.items():
lowercase__ = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowercase__ = value
return destination
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = None ) -> bool:
if port is None:
lowercase__ = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 269 | 0 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : int = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
A_ : Optional[int] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
A_ : List[Any] = spec.loader.load_module()
A_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
A_ : Dict = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
A_ : List[str] = {
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for config_class in list(CONFIG_MAPPING.values() ):
SCREAMING_SNAKE_CASE__ = False
# source code of `config_class`
SCREAMING_SNAKE_CASE__ = inspect.getsource(_lowercase )
SCREAMING_SNAKE_CASE__ = _re_checkpoint.findall(_lowercase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
SCREAMING_SNAKE_CASE__ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
SCREAMING_SNAKE_CASE__ = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
SCREAMING_SNAKE_CASE__ = True
break
SCREAMING_SNAKE_CASE__ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowercase )
if len(_lowercase ) > 0:
SCREAMING_SNAKE_CASE__ = """\n""".join(sorted(_lowercase ) )
raise ValueError(f"""The following configurations don\'t contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 165 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a : List[Any] = """__DUMMY_TRANSFORMERS_USER__"""
a : Tuple = """Dummy User"""
a : Optional[Any] = """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"""
a : Optional[Any] = """https://hub-ci.huggingface.co"""
a : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
a : Tuple = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
a : str = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> List[str]:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowercase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Any:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
HfFolder.save_token(_lowercase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( ) -> str:
return HfApi(endpoint=_lowercase )
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCAmelCase : str = HfFolder.get_token()
HfFolder.save_token(_lowercase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowercase )
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> Any:
def _cleanup_repo(_lowercase ):
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __lowerCamelCase ( _lowercase ) -> List[str]:
@contextmanager
def _temporary_repo(_lowercase ):
try:
yield repo_id
finally:
cleanup_repo(_lowercase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
UpperCAmelCase : str = F'''repo_txt_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : List[Any] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCAmelCase : Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : Optional[int] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : List[Any] = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}'''
UpperCAmelCase : List[str] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase )
hf_api.upload_file(
token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
return hf_private_dataset_repo_zipped_img_data_
| 265 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__magic_name__ :Optional[Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = AudioClassificationPipeline(model=__UpperCAmelCase , feature_extractor=__UpperCAmelCase )
# test with a raw waveform
lowerCAmelCase__ :Tuple = np.zeros((3_4_0_0_0,) )
lowerCAmelCase__ :Any = np.zeros((1_4_0_0_0,) )
return audio_classifier, [audioa, audio]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = examples
lowerCAmelCase__ :Dict = audio_classifier(__UpperCAmelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
__UpperCAmelCase , [
{'score': ANY(__UpperCAmelCase ), 'label': ANY(__UpperCAmelCase )},
{'score': ANY(__UpperCAmelCase ), 'label': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ :Dict = audio_classifier(__UpperCAmelCase , top_k=1 )
self.assertEqual(
__UpperCAmelCase , [
{'score': ANY(__UpperCAmelCase ), 'label': ANY(__UpperCAmelCase )},
] , )
self.run_torchaudio(__UpperCAmelCase )
@require_torchaudio
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
import datasets
# test with a local file
lowerCAmelCase__ :Optional[Any] = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
lowerCAmelCase__ :List[Any] = dataset[0]['audio']['array']
lowerCAmelCase__ :Any = audio_classifier(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{'score': ANY(__UpperCAmelCase ), 'label': ANY(__UpperCAmelCase )},
{'score': ANY(__UpperCAmelCase ), 'label': ANY(__UpperCAmelCase )},
] , )
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 'anton-l/wav2vec2-random-tiny-classifier'
lowerCAmelCase__ :List[str] = pipeline('audio-classification' , model=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = np.ones((8_0_0_0,) )
lowerCAmelCase__ :List[Any] = audio_classifier(__UpperCAmelCase , top_k=4 )
lowerCAmelCase__ :Optional[int] = [
{'score': 0.08_42, 'label': 'no'},
{'score': 0.08_38, 'label': 'up'},
{'score': 0.08_37, 'label': 'go'},
{'score': 0.08_34, 'label': 'right'},
]
lowerCAmelCase__ :List[str] = [
{'score': 0.08_45, 'label': 'stop'},
{'score': 0.08_44, 'label': 'on'},
{'score': 0.08_41, 'label': 'right'},
{'score': 0.08_34, 'label': 'left'},
]
self.assertIn(nested_simplify(__UpperCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowerCAmelCase__ :int = {'array': np.ones((8_0_0_0,) ), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
lowerCAmelCase__ :Union[str, Any] = audio_classifier(__UpperCAmelCase , top_k=4 )
self.assertIn(nested_simplify(__UpperCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def snake_case ( self ):
'''simple docstring'''
import datasets
lowerCAmelCase__ :Tuple = 'superb/wav2vec2-base-superb-ks'
lowerCAmelCase__ :Optional[int] = pipeline('audio-classification' , model=__UpperCAmelCase )
lowerCAmelCase__ :Any = datasets.load_dataset('anton-l/superb_dummy' , 'ks' , split='test' )
lowerCAmelCase__ :Tuple = np.array(dataset[3]['speech'] , dtype=np.floataa )
lowerCAmelCase__ :Dict = audio_classifier(__UpperCAmelCase , top_k=4 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=3 ) , [
{'score': 0.9_81, 'label': 'go'},
{'score': 0.0_07, 'label': 'up'},
{'score': 0.0_06, 'label': '_unknown_'},
{'score': 0.0_01, 'label': 'down'},
] , )
@require_tf
@unittest.skip('Audio classification is not implemented for TF' )
def snake_case ( self ):
'''simple docstring'''
pass
| 254 |
"""simple docstring"""
from pathlib import Path
import fire
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :List[str] = Path(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = Path(_SCREAMING_SNAKE_CASE )
dest_dir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
for path in src_dir.iterdir():
lowerCAmelCase__ :Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCAmelCase__ :Tuple = dest_dir.joinpath(path.name )
print(_SCREAMING_SNAKE_CASE )
dest_path.open('w' ).write('\n'.join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
fire.Fire(minify)
| 254 | 1 |
import sys
from collections import defaultdict
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> List[str]:
__UpperCamelCase =[]
def _a ( self , A_ ) -> List[Any]:
return self.node_position[vertex]
def _a ( self , A_ , A_ ) -> List[Any]:
__UpperCamelCase =pos
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__UpperCamelCase =2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__UpperCamelCase =2 * start + 1
else:
__UpperCamelCase =2 * start + 2
if heap[smallest_child] < heap[start]:
__UpperCamelCase , __UpperCamelCase =heap[smallest_child], positions[smallest_child]
__UpperCamelCase , __UpperCamelCase =(
heap[start],
positions[start],
)
__UpperCamelCase , __UpperCamelCase =temp, tempa
__UpperCamelCase =self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , A_ )
self.top_to_bottom(A_ , A_ , A_ , A_ )
def _a ( self , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =position[index]
while index != 0:
__UpperCamelCase =int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__UpperCamelCase =heap[parent]
__UpperCamelCase =position[parent]
self.set_position(position[parent] , A_ )
else:
__UpperCamelCase =val
__UpperCamelCase =temp
self.set_position(A_ , A_ )
break
__UpperCamelCase =parent
else:
__UpperCamelCase =val
__UpperCamelCase =temp
self.set_position(A_ , 0 )
def _a ( self , A_ , A_ ) -> List[str]:
__UpperCamelCase =len(A_ ) // 2 - 1
for i in range(A_ , -1 , -1 ):
self.top_to_bottom(A_ , A_ , len(A_ ) , A_ )
def _a ( self , A_ , A_ ) -> str:
__UpperCamelCase =positions[0]
__UpperCamelCase =sys.maxsize
self.top_to_bottom(A_ , 0 , len(A_ ) , A_ )
return temp
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =Heap()
__UpperCamelCase =[0] * len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[-1] * len(SCREAMING_SNAKE_CASE__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__UpperCamelCase =[] # Heap of Distance of vertices from their neighboring vertex
__UpperCamelCase =[]
for vertex in range(len(SCREAMING_SNAKE_CASE__ ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE__ )
heap.node_position.append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
__UpperCamelCase =1
__UpperCamelCase =sys.maxsize
for neighbor, distance in adjacency_list[0]:
__UpperCamelCase =0
__UpperCamelCase =distance
heap.heapify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for _ in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =heap.delete_minimum(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__UpperCamelCase =1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE__ )]
):
__UpperCamelCase =distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE__ , heap.get_position(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_A = int(input('Enter number of edges: ').strip())
_A = defaultdict(list)
for _ in range(edges_number):
_A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 62 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "instructblip_vision_model"
def __init__( self , A_=1408 , A_=6144 , A_=39 , A_=16 , A_=224 , A_=14 , A_="gelu" , A_=1E-6 , A_=0.0 , A_=1E-10 , A_=True , **A_ , ) -> Tuple:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =qkv_bias
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "instructblip_qformer"
def __init__( self , A_=30522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , A_=1E-12 , A_=0 , A_="absolute" , A_=2 , A_=1408 , **A_ , ) -> Optional[Any]:
super().__init__(pad_token_id=A_ , **A_ )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =position_embedding_type
__UpperCamelCase =cross_attention_frequency
__UpperCamelCase =encoder_hidden_size
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__UpperCamelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "instructblip"
UpperCAmelCase__ : Optional[Any] = True
def __init__( self , A_=None , A_=None , A_=None , A_=32 , **A_ ) -> List[str]:
super().__init__(**A_ )
if vision_config is None:
__UpperCamelCase ={}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__UpperCamelCase ={}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__UpperCamelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__UpperCamelCase =InstructBlipVisionConfig(**A_ )
__UpperCamelCase =InstructBlipQFormerConfig(**A_ )
__UpperCamelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
__UpperCamelCase =CONFIG_MAPPING[text_model_type](**A_ )
__UpperCamelCase =self.text_config.tie_word_embeddings
__UpperCamelCase =self.text_config.is_encoder_decoder
__UpperCamelCase =num_query_tokens
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__UpperCamelCase =1.0
__UpperCamelCase =0.02
@classmethod
def _a ( cls , A_ , A_ , A_ , **A_ , ) -> Optional[Any]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A_ , )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.qformer_config.to_dict()
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 62 | 1 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = None
lowerCAmelCase__ = BloomTokenizerFast
lowerCAmelCase__ = BloomTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = "tokenizer_file"
lowerCAmelCase__ = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
lowercase_ = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.get_rust_tokenizer()
lowercase_ = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
lowercase_ = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
lowercase_ = tokenizer.batch_encode_plus(UpperCAmelCase )["input_ids"]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowercase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self , UpperCAmelCase=6 ) -> Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase_ = "This is a simple input"
lowercase_ = ["This is a simple input 1", "This is a simple input 2"]
lowercase_ = ("This is a simple input", "This is a pair")
lowercase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase , max_length=UpperCAmelCase )
tokenizer_r.encode_plus(UpperCAmelCase , max_length=UpperCAmelCase )
tokenizer_r.batch_encode_plus(UpperCAmelCase , max_length=UpperCAmelCase )
tokenizer_r.encode(UpperCAmelCase , max_length=UpperCAmelCase )
tokenizer_r.batch_encode_plus(UpperCAmelCase , max_length=UpperCAmelCase )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
lowercase_ = None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length" , )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.get_rust_tokenizer()
lowercase_ = load_dataset("xnli" , "all_languages" , split="test" , streaming=UpperCAmelCase )
lowercase_ = next(iter(UpperCAmelCase ) )["premise"] # pick up one data
lowercase_ = list(sample_data.values() )
lowercase_ = list(map(tokenizer.encode , UpperCAmelCase ) )
lowercase_ = [tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase ) for x in output_tokens]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 297 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = "informer"
UpperCAmelCase__ : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = None , A_ = "mean" , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 64 , A_ = 32 , A_ = 32 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = True , A_ = "gelu" , A_ = 0.05 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.02 , A_=True , A_ = "prob" , A_ = 5 , A_ = True , **A_ , ) -> str:
# time series specific configuration
__UpperCamelCase =prediction_length
__UpperCamelCase =context_length or prediction_length
__UpperCamelCase =distribution_output
__UpperCamelCase =loss
__UpperCamelCase =input_size
__UpperCamelCase =num_time_features
__UpperCamelCase =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__UpperCamelCase =scaling
__UpperCamelCase =num_dynamic_real_features
__UpperCamelCase =num_static_real_features
__UpperCamelCase =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__UpperCamelCase =cardinality
else:
__UpperCamelCase =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__UpperCamelCase =embedding_dimension
else:
__UpperCamelCase =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__UpperCamelCase =num_parallel_samples
# Transformer architecture configuration
__UpperCamelCase =input_size * len(self.lags_sequence ) + self._number_of_features
__UpperCamelCase =d_model
__UpperCamelCase =encoder_attention_heads
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =encoder_ffn_dim
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =encoder_layers
__UpperCamelCase =decoder_layers
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =encoder_layerdrop
__UpperCamelCase =decoder_layerdrop
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =use_cache
# Informer
__UpperCamelCase =attention_type
__UpperCamelCase =sampling_factor
__UpperCamelCase =distil
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def _a ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 62 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 62 | 1 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( UpperCAmelCase_ ,UpperCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , *,
A = 4 , A = 7_6_8 , A , A , ) -> str:
super().__init__()
_UpperCAmelCase : Any = nn.Parameter(torch.zeros(__lowercase ) )
# parameters for additional clip time embeddings
_UpperCAmelCase : Optional[Any] = nn.Linear(__lowercase , __lowercase )
_UpperCAmelCase : int = nn.Linear(__lowercase , __lowercase )
# parameters for encoder hidden states
_UpperCAmelCase : Tuple = clip_extra_context_tokens
_UpperCAmelCase : int = nn.Linear(
__lowercase , self.clip_extra_context_tokens * cross_attention_dim )
_UpperCAmelCase : Union[str, Any] = nn.Linear(__lowercase , __lowercase )
_UpperCAmelCase : int = nn.LayerNorm(__lowercase )
def __lowerCAmelCase ( self , *, A , A , A , A ) -> Tuple:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_UpperCAmelCase : str = image_embeddings.shape[0]
_UpperCAmelCase : int = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_UpperCAmelCase : Any = classifier_free_guidance_embeddings.expand(
__lowercase , -1 )
_UpperCAmelCase : List[Any] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_UpperCAmelCase : Tuple = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_UpperCAmelCase : Dict = self.embedding_proj(__lowercase )
_UpperCAmelCase : Optional[Any] = self.clip_image_embeddings_project_to_time_embeddings(__lowercase )
_UpperCAmelCase : Any = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_UpperCAmelCase : Tuple = self.clip_extra_context_tokens_proj(__lowercase )
_UpperCAmelCase : Optional[int] = clip_extra_context_tokens.reshape(__lowercase , -1 , self.clip_extra_context_tokens )
_UpperCAmelCase : Dict = clip_extra_context_tokens.permute(0 , 2 , 1 )
_UpperCAmelCase : Dict = self.encoder_hidden_states_proj(__lowercase )
_UpperCAmelCase : Union[str, Any] = self.text_encoder_hidden_states_norm(__lowercase )
_UpperCAmelCase : str = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 366 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Dict:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
_UpperCAmelCase : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCAmelCase : List[str] = DisjunctiveConstraint(A )
self.assertTrue(isinstance(dc.token_ids , A ) )
with self.assertRaises(A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
_UpperCAmelCase : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(A ):
DisjunctiveConstraint(A ) # fails here
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Optional[Any] = [[1, 2, 3], [1, 2, 4]]
_UpperCAmelCase : Optional[int] = DisjunctiveConstraint(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = dc.update(1 )
_UpperCAmelCase : Dict = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(2 )
_UpperCAmelCase : Any = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = dc.update(3 )
_UpperCAmelCase : Optional[int] = stepped is True and completed is True and reset is False
self.assertTrue(A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCAmelCase : Any = DisjunctiveConstraint(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 68 | 0 |
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _a :
__a : float
__a : TreeNode | None = None
__a : TreeNode | None = None
def snake_case_ (_a : TreeNode | None ):
def is_valid_tree(_a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(snake_case_ , snake_case_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case_ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
_a : TreeNode | None , _a : float , _a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case_ )
)
return is_binary_search_tree_recursive_check(snake_case_ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 | '''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_A : int = True
except (ImportError, ModuleNotFoundError):
_A : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCamelCase_ ( snake_case_ : str ) -> str:
'''simple docstring'''
re.sub("""<n>""" , """""" , snake_case_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case_ ) )
| 229 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: int ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
a__ : Any = """swin"""
a__ : Any = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__(self : Any , __a : Optional[int]=224 , __a : Tuple=4 , __a : Optional[Any]=3 , __a : List[str]=96 , __a : List[str]=[2, 2, 6, 2] , __a : Union[str, Any]=[3, 6, 12, 24] , __a : int=7 , __a : Optional[Any]=4.0 , __a : Union[str, Any]=True , __a : Dict=0.0 , __a : Optional[int]=0.0 , __a : Optional[int]=0.1 , __a : Dict="gelu" , __a : int=False , __a : Optional[Any]=0.02 , __a : str=1E-5 , __a : str=32 , __a : Tuple=None , __a : List[Any]=None , **__a : List[str] , ):
super().__init__(**__a )
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(__a )
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(__a ) - 1) )
UpperCAmelCase_ = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(__a ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
class __A ( UpperCamelCase__ ):
a__ : Tuple = version.parse("""1.11""" )
@property
def _lowercase (self : str ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase (self : Dict ):
return 1E-4
| 106 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase_ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase_ = model(__a )["last_hidden_state"]
UpperCAmelCase_ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
UpperCAmelCase_ = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 106 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 343 | def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase_ ) * abs(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 343 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]:
def is_in_circle(__lowerCAmelCase , __lowerCAmelCase ) -> bool:
UpperCamelCase__ : List[str] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCamelCase__ : Dict = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
UpperCamelCase__ : List[str] = proportion * 4
print(f'The estimated value of pi is {pi_estimate}' )
print(f'The numpy value of pi is {pi}' )
print(f'The total error is {abs(pi - pi_estimate )}' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(_UpperCamelCase , _UpperCamelCase ) ) for _ in range(_UpperCamelCase ) ) * (max_value - min_value)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 ) -> None:
def identity_function(__lowerCAmelCase ) -> float:
return x
UpperCamelCase__ : Any = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ : int = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {expected_value}' )
print(f'Total error is {abs(estimated_value - expected_value )}' )
print("******************" )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> None:
def function_to_integrate(__lowerCAmelCase ) -> float:
return sqrt(4.0 - x * x )
UpperCamelCase__ : Dict = area_under_curve_estimator(
_UpperCamelCase , _UpperCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f'Estimated value is {estimated_value}' )
print(f'Expected value is {pi}' )
print(f'Total error is {abs(estimated_value - pi )}' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 359 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
if len(__lowerCAmelCase ) == 0:
return False
UpperCamelCase__ : Any = len(__lowerCAmelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __lowerCAmelCase )
else:
return binary_search(a_list[midpoint + 1 :] , __lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : Any =input('''Enter numbers separated by comma:\n''').strip()
lowerCamelCase : Dict =[int(item.strip()) for item in user_input.split(''',''')]
lowerCamelCase : List[str] =int(input('''Enter the number to be found in the list:\n''').strip())
lowerCamelCase : Union[str, Any] ='''''' if binary_search(sequence, target) else '''not '''
print(F"""{target} was {not_str}found in {sequence}""") | 196 | 0 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __get__( self : str , snake_case_ : Union[str, Any] , snake_case_ : Tuple=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
snake_case__ : Optional[int] = """__cached_""" + self.fget.__name__
snake_case__ : int = getattr(snake_case_ , snake_case_ , snake_case_ )
if cached is None:
snake_case__ : Union[str, Any] = self.fget(snake_case_ )
setattr(snake_case_ , snake_case_ , snake_case_ )
return cached
def __snake_case( _lowerCAmelCase ) -> Tuple:
snake_case__ : Optional[int] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"invalid truth value {val!r}" )
def __snake_case( _lowerCAmelCase ) -> Any:
if is_torch_fx_proxy(_lowerCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(_lowerCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowerCAmelCase , np.ndarray )
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
return isinstance(_lowerCAmelCase , np.ndarray )
def __snake_case( _lowerCAmelCase ) -> int:
return _is_numpy(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[str]:
import torch
return isinstance(_lowerCAmelCase , torch.Tensor )
def __snake_case( _lowerCAmelCase ) -> str:
return False if not is_torch_available() else _is_torch(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[str]:
import torch
return isinstance(_lowerCAmelCase , torch.device )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
return False if not is_torch_available() else _is_torch_device(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
import torch
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
if hasattr(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
else:
return False
return isinstance(_lowerCAmelCase , torch.dtype )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
return False if not is_torch_available() else _is_torch_dtype(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[str]:
import tensorflow as tf
return isinstance(_lowerCAmelCase , tf.Tensor )
def __snake_case( _lowerCAmelCase ) -> Any:
return False if not is_tf_available() else _is_tensorflow(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCAmelCase , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(_lowerCAmelCase )
return type(_lowerCAmelCase ) == tf.Tensor
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> Dict:
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCAmelCase , jnp.ndarray )
def __snake_case( _lowerCAmelCase ) -> List[str]:
return False if not is_flax_available() else _is_jax(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> Any:
if isinstance(_lowerCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(_lowerCAmelCase ) for k, v in obj.items()}
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return [to_py_obj(_lowerCAmelCase ) for o in obj]
elif is_tf_tensor(_lowerCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCAmelCase ):
return np.asarray(_lowerCAmelCase ).tolist()
elif isinstance(_lowerCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __snake_case( _lowerCAmelCase ) -> List[Any]:
if isinstance(_lowerCAmelCase , (dict, UserDict) ):
return {k: to_numpy(_lowerCAmelCase ) for k, v in obj.items()}
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return np.array(_lowerCAmelCase )
elif is_tf_tensor(_lowerCAmelCase ):
return obj.numpy()
elif is_torch_tensor(_lowerCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCAmelCase ):
return np.asarray(_lowerCAmelCase )
else:
return obj
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : List[str] = fields(self )
# Safety and consistency checks
if not len(snake_case_ ):
raise ValueError(f"{self.__class__.__name__} has no fields." )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field." )
snake_case__ : List[str] = getattr(self , class_fields[0].name )
snake_case__ : int = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(snake_case_ ):
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : List[str] = first_field.items()
snake_case__ : Optional[int] = True
else:
try:
snake_case__ : Optional[Any] = iter(snake_case_ )
snake_case__ : Optional[int] = True
except TypeError:
snake_case__ : Dict = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(snake_case_ ):
if (
not isinstance(snake_case_ , (list, tuple) )
or not len(snake_case_ ) == 2
or not isinstance(element[0] , snake_case_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
snake_case__ : int = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value)." )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
snake_case__ : Union[str, Any] = element[1]
elif first_field is not None:
snake_case__ : List[str] = first_field
else:
for field in class_fields:
snake_case__ : List[Any] = getattr(self , field.name )
if v is not None:
snake_case__ : Dict = v
def __delitem__( self : Union[str, Any] , *snake_case_ : Tuple , **snake_case_ : int ):
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." )
def lowerCamelCase ( self : List[str] , *snake_case_ : List[Any] , **snake_case_ : Optional[Any] ):
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." )
def lowerCamelCase ( self : Dict , *snake_case_ : List[Any] , **snake_case_ : str ):
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance." )
def lowerCamelCase ( self : List[str] , *snake_case_ : Tuple , **snake_case_ : Tuple ):
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance." )
def __getitem__( self : Tuple , snake_case_ : Optional[Any] ):
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : int = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : List[str] , snake_case_ : Tuple , snake_case_ : Union[str, Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(snake_case_ , snake_case_ )
super().__setattr__(snake_case_ , snake_case_ )
def __setitem__( self : str , snake_case_ : Union[str, Any] , snake_case_ : int ):
# Will raise a KeyException if needed
super().__setitem__(snake_case_ , snake_case_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(snake_case_ , snake_case_ )
def lowerCamelCase ( self : int ):
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase_ ( _a , _a ):
"""simple docstring"""
@classmethod
def lowerCamelCase ( cls : int , snake_case_ : List[Any] ):
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "longest"
lowercase = "max_length"
lowercase = "do_not_pad"
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "pt"
lowercase = "tf"
lowercase = "np"
lowercase = "jax"
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : int , snake_case_ : List[ContextManager] ):
snake_case__ : Any = context_managers
snake_case__ : int = ExitStack()
def __enter__( self : Union[str, Any] ):
for context_manager in self.context_managers:
self.stack.enter_context(snake_case_ )
def __exit__( self : Optional[int] , *snake_case_ : List[Any] , **snake_case_ : Any ):
self.stack.__exit__(*snake_case_ , **snake_case_ )
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : Optional[Any] = infer_framework(_lowerCAmelCase )
if framework == "tf":
snake_case__ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case__ : str = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case__ : str = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : Optional[int] = model_class.__name__
snake_case__ : int = infer_framework(_lowerCAmelCase )
if framework == "tf":
snake_case__ : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
snake_case__ : int = inspect.signature(model_class.forward ) # PyTorch models
else:
snake_case__ : Any = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase = "" , _lowerCAmelCase = "." ) -> List[str]:
def _flatten_dict(_lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase="." ):
for k, v in d.items():
snake_case__ : Optional[int] = str(_lowerCAmelCase ) + delimiter + str(_lowerCAmelCase ) if parent_key else k
if v and isinstance(_lowerCAmelCase , _lowerCAmelCase ):
yield from flatten_dict(_lowerCAmelCase , _lowerCAmelCase , delimiter=_lowerCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) )
@contextmanager
def __snake_case( _lowerCAmelCase , _lowerCAmelCase = False ) -> Tuple:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> Optional[Any]:
if is_numpy_array(_lowerCAmelCase ):
return np.transpose(_lowerCAmelCase , axes=_lowerCAmelCase )
elif is_torch_tensor(_lowerCAmelCase ):
return array.T if axes is None else array.permute(*_lowerCAmelCase )
elif is_tf_tensor(_lowerCAmelCase ):
import tensorflow as tf
return tf.transpose(_lowerCAmelCase , perm=_lowerCAmelCase )
elif is_jax_tensor(_lowerCAmelCase ):
return jnp.transpose(_lowerCAmelCase , axes=_lowerCAmelCase )
else:
raise ValueError(f"Type not supported for transpose: {type(_lowerCAmelCase )}." )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
if is_numpy_array(_lowerCAmelCase ):
return np.reshape(_lowerCAmelCase , _lowerCAmelCase )
elif is_torch_tensor(_lowerCAmelCase ):
return array.reshape(*_lowerCAmelCase )
elif is_tf_tensor(_lowerCAmelCase ):
import tensorflow as tf
return tf.reshape(_lowerCAmelCase , _lowerCAmelCase )
elif is_jax_tensor(_lowerCAmelCase ):
return jnp.reshape(_lowerCAmelCase , _lowerCAmelCase )
else:
raise ValueError(f"Type not supported for reshape: {type(_lowerCAmelCase )}." )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> Dict:
if is_numpy_array(_lowerCAmelCase ):
return np.squeeze(_lowerCAmelCase , axis=_lowerCAmelCase )
elif is_torch_tensor(_lowerCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCAmelCase )
elif is_tf_tensor(_lowerCAmelCase ):
import tensorflow as tf
return tf.squeeze(_lowerCAmelCase , axis=_lowerCAmelCase )
elif is_jax_tensor(_lowerCAmelCase ):
return jnp.squeeze(_lowerCAmelCase , axis=_lowerCAmelCase )
else:
raise ValueError(f"Type not supported for squeeze: {type(_lowerCAmelCase )}." )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
if is_numpy_array(_lowerCAmelCase ):
return np.expand_dims(_lowerCAmelCase , _lowerCAmelCase )
elif is_torch_tensor(_lowerCAmelCase ):
return array.unsqueeze(dim=_lowerCAmelCase )
elif is_tf_tensor(_lowerCAmelCase ):
import tensorflow as tf
return tf.expand_dims(_lowerCAmelCase , axis=_lowerCAmelCase )
elif is_jax_tensor(_lowerCAmelCase ):
return jnp.expand_dims(_lowerCAmelCase , axis=_lowerCAmelCase )
else:
raise ValueError(f"Type not supported for expand_dims: {type(_lowerCAmelCase )}." )
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
if is_numpy_array(_lowerCAmelCase ):
return np.size(_lowerCAmelCase )
elif is_torch_tensor(_lowerCAmelCase ):
return array.numel()
elif is_tf_tensor(_lowerCAmelCase ):
import tensorflow as tf
return tf.size(_lowerCAmelCase )
elif is_jax_tensor(_lowerCAmelCase ):
return array.size
else:
raise ValueError(f"Type not supported for expand_dims: {type(_lowerCAmelCase )}." )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
for key, value in auto_map.items():
if isinstance(_lowerCAmelCase , (tuple, list) ):
snake_case__ : Tuple = [f"{repo_id}--{v}" if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
snake_case__ : str = f"{repo_id}--{value}"
return auto_map
def __snake_case( _lowerCAmelCase ) -> int:
for base_class in inspect.getmro(_lowerCAmelCase ):
snake_case__ : int = base_class.__module__
snake_case__ : Any = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"Could not infer framework from class {model_class}." )
| 35 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=False ) -> str:
snake_case__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = """"""
else:
snake_case__ : Dict = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[int] = in_proj_bias[: config.hidden_size]
snake_case__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Tuple = in_proj_bias[-config.hidden_size :]
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : str = dct.pop(_lowerCAmelCase )
snake_case__ : Tuple = val
def __snake_case( ) -> Tuple:
snake_case__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
snake_case__ : Optional[int] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Union[str, Any] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : int = 1_000
snake_case__ : Any = """huggingface/label-files"""
snake_case__ : Optional[Any] = """imagenet-1k-id2label.json"""
snake_case__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : List[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[str] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
snake_case__ : Tuple = 192
snake_case__ : Union[str, Any] = 768
snake_case__ : Tuple = 12
snake_case__ : Union[str, Any] = 3
elif deit_name[9:].startswith("""small""" ):
snake_case__ : str = 384
snake_case__ : Any = 1_536
snake_case__ : str = 12
snake_case__ : int = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
snake_case__ : Union[str, Any] = 1_024
snake_case__ : Any = 4_096
snake_case__ : List[Any] = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : List[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
snake_case__ : Optional[Any] = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : List[Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : Optional[Any] = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size )
snake_case__ : str = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ : Optional[Any] = encoding["""pixel_values"""]
snake_case__ : Tuple = model(_lowerCAmelCase )
snake_case__ : Optional[int] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 35 | 1 |
def a ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 0 ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = length or len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
UpperCamelCase : int = list_data[i + 1], list_data[i]
UpperCamelCase : int = True
return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ):
"""simple docstring"""
UpperCamelCase : List[str] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 315 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Any ):
"""simple docstring"""
__a =np.argmax(_snake_case , axis=1 )
return np.sum(outputs == labels )
def UpperCamelCase_( _snake_case : List[str] ):
"""simple docstring"""
with open(_snake_case , encoding='utf_8' ) as f:
__a =csv.reader(_snake_case )
__a =[]
next(_snake_case ) # skip the first line
for line in tqdm(_snake_case ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def UpperCamelCase_( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Dict , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Tuple ):
"""simple docstring"""
__a =[]
for dataset in encoded_datasets:
__a =len(_snake_case )
__a =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__a =np.zeros((n_batch, 2) , dtype=np.intaa )
__a =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__a =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_snake_case ):
__a =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a =with_conta
__a =with_conta
__a =len(_snake_case ) - 1
__a =len(_snake_case ) - 1
__a =with_conta
__a =with_conta
__a =mc_label
__a =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_snake_case ) for t in all_inputs ) )
return tensor_datasets
def UpperCamelCase_( ):
"""simple docstring"""
__a =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=_snake_case , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=_snake_case , type=_snake_case , required=_snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=_snake_case , default='' )
parser.add_argument('--eval_dataset' , type=_snake_case , default='' )
parser.add_argument('--seed' , type=_snake_case , default=42 )
parser.add_argument('--num_train_epochs' , type=_snake_case , default=3 )
parser.add_argument('--train_batch_size' , type=_snake_case , default=8 )
parser.add_argument('--eval_batch_size' , type=_snake_case , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=_snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=_snake_case , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=_snake_case , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=_snake_case , default=6.2_5e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=_snake_case , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=_snake_case , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=_snake_case , default=0.01 )
parser.add_argument('--lm_coef' , type=_snake_case , default=0.9 )
parser.add_argument('--n_valid' , type=_snake_case , default=374 )
parser.add_argument('--server_ip' , type=_snake_case , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_snake_case , default='' , help='Can be used for distant debugging.' )
__a =parser.parse_args()
print(_snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__a =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__a =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(_snake_case , _snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a =['_start_', '_delimiter_', '_classify_']
__a =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_snake_case )
__a =tokenizer.convert_tokens_to_ids(_snake_case )
__a =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_snake_case ) )
model.to(_snake_case )
# Load and encode the datasets
def tokenize_and_encode(_snake_case : int ):
if isinstance(_snake_case , _snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_snake_case ) )
elif isinstance(_snake_case , _snake_case ):
return obj
return [tokenize_and_encode(_snake_case ) for o in obj]
logger.info('Encoding dataset...' )
__a =load_rocstories_dataset(args.train_dataset )
__a =load_rocstories_dataset(args.eval_dataset )
__a =(train_dataset, eval_dataset)
__a =tokenize_and_encode(_snake_case )
# Compute the max input length for the Transformer
__a =model.config.n_positions // 2 - 2
__a =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__a =min(_snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a =pre_process_datasets(_snake_case , _snake_case , _snake_case , *_snake_case )
__a , __a =tensor_datasets[0], tensor_datasets[1]
__a =TensorDataset(*_snake_case )
__a =RandomSampler(_snake_case )
__a =DataLoader(_snake_case , sampler=_snake_case , batch_size=args.train_batch_size )
__a =TensorDataset(*_snake_case )
__a =SequentialSampler(_snake_case )
__a =DataLoader(_snake_case , sampler=_snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a =args.max_steps
__a =args.max_steps // (len(_snake_case ) // args.gradient_accumulation_steps) + 1
else:
__a =len(_snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
__a =list(model.named_parameters() )
__a =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__a =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__a =AdamW(_snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
__a =get_linear_schedule_with_warmup(
_snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=_snake_case )
if args.do_train:
__a , __a , __a =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__a =0
__a =0
__a =tqdm(_snake_case , desc='Training' )
for step, batch in enumerate(_snake_case ):
__a =tuple(t.to(_snake_case ) for t in batch )
__a , __a , __a , __a =batch
__a =model(_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__a =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a ='Training loss: {:.2e} lr: {:.2e}'.format(_snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a =model.module if hasattr(_snake_case , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a =os.path.join(args.output_dir , _snake_case )
__a =os.path.join(args.output_dir , _snake_case )
torch.save(model_to_save.state_dict() , _snake_case )
model_to_save.config.to_json_file(_snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__a =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__a =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_snake_case )
if args.do_eval:
model.eval()
__a , __a =0, 0
__a , __a =0, 0
for batch in tqdm(_snake_case , desc='Evaluating' ):
__a =tuple(t.to(_snake_case ) for t in batch )
__a , __a , __a , __a =batch
with torch.no_grad():
__a , __a , __a , __a =model(
_snake_case , mc_token_ids=_snake_case , lm_labels=_snake_case , mc_labels=_snake_case )
__a =mc_logits.detach().cpu().numpy()
__a =mc_labels.to('cpu' ).numpy()
__a =accuracy(_snake_case , _snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__a =eval_loss / nb_eval_steps
__a =eval_accuracy / nb_eval_examples
__a =tr_loss / nb_tr_steps if args.do_train else None
__a ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__a =os.path.join(args.output_dir , 'eval_results.txt' )
with open(_snake_case , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _snake_case , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 218 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_lowerCAmelCase : str = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_lowerCAmelCase : List[Any] = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_lowerCAmelCase : str = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
_lowerCAmelCase : List[str] = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
_lowerCAmelCase : Optional[Any] = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
_lowerCAmelCase : List[str] = {
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase_( _snake_case : Tuple , _snake_case : List[str] , _snake_case : Any , _snake_case : str , _snake_case : Union[str, Any]=False ):
"""simple docstring"""
__a =checkpoint[F'{old_prefix}.in_layers.0.weight']
__a =checkpoint[F'{old_prefix}.in_layers.0.bias']
__a =checkpoint[F'{old_prefix}.in_layers.2.weight']
__a =checkpoint[F'{old_prefix}.in_layers.2.bias']
__a =checkpoint[F'{old_prefix}.emb_layers.1.weight']
__a =checkpoint[F'{old_prefix}.emb_layers.1.bias']
__a =checkpoint[F'{old_prefix}.out_layers.0.weight']
__a =checkpoint[F'{old_prefix}.out_layers.0.bias']
__a =checkpoint[F'{old_prefix}.out_layers.3.weight']
__a =checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
__a =checkpoint[F'{old_prefix}.skip_connection.weight']
__a =checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def UpperCamelCase_( _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Optional[int]=None ):
"""simple docstring"""
__a , __a , __a =checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0 )
__a , __a , __a =checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0 )
__a =checkpoint[F'{old_prefix}.norm.weight']
__a =checkpoint[F'{old_prefix}.norm.bias']
__a =weight_q.squeeze(-1 ).squeeze(-1 )
__a =bias_q.squeeze(-1 ).squeeze(-1 )
__a =weight_k.squeeze(-1 ).squeeze(-1 )
__a =bias_k.squeeze(-1 ).squeeze(-1 )
__a =weight_v.squeeze(-1 ).squeeze(-1 )
__a =bias_v.squeeze(-1 ).squeeze(-1 )
__a =(
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 )
)
__a =checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase_( _snake_case : str , _snake_case : Tuple ):
"""simple docstring"""
__a =torch.load(_snake_case , map_location='cpu' )
__a ={}
__a =checkpoint['time_embed.0.weight']
__a =checkpoint['time_embed.0.bias']
__a =checkpoint['time_embed.2.weight']
__a =checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
__a =checkpoint['label_emb.weight']
__a =checkpoint['input_blocks.0.0.weight']
__a =checkpoint['input_blocks.0.0.bias']
__a =unet_config['down_block_types']
__a =unet_config['layers_per_block']
__a =unet_config['attention_head_dim']
__a =unet_config['block_out_channels']
__a =1
__a =channels_list[0]
for i, layer_type in enumerate(_snake_case ):
__a =channels_list[i]
__a =current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_snake_case ):
__a =F'down_blocks.{i}.resnets.{j}'
__a =F'input_blocks.{current_layer}.0'
__a =True if j == 0 and downsample_block_has_skip else False
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_snake_case ):
__a =F'down_blocks.{i}.resnets.{j}'
__a =F'input_blocks.{current_layer}.0'
__a =True if j == 0 and downsample_block_has_skip else False
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
__a =F'down_blocks.{i}.attentions.{j}'
__a =F'input_blocks.{current_layer}.1'
__a =convert_attention(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
__a =F'down_blocks.{i}.downsamplers.0'
__a =F'input_blocks.{current_layer}.0'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
__a =current_channels
# hardcoded the mid-block for now
__a ='mid_block.resnets.0'
__a ='middle_block.0'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
__a ='mid_block.attentions.0'
__a ='middle_block.1'
__a =convert_attention(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
__a ='mid_block.resnets.1'
__a ='middle_block.2'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
__a =0
__a =unet_config['up_block_types']
for i, layer_type in enumerate(_snake_case ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__a =F'up_blocks.{i}.resnets.{j}'
__a =F'output_blocks.{current_layer}.0'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
__a =F'up_blocks.{i}.upsamplers.0'
__a =F'output_blocks.{current_layer-1}.1'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__a =F'up_blocks.{i}.resnets.{j}'
__a =F'output_blocks.{current_layer}.0'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
__a =F'up_blocks.{i}.attentions.{j}'
__a =F'output_blocks.{current_layer}.1'
__a =convert_attention(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
__a =F'up_blocks.{i}.upsamplers.0'
__a =F'output_blocks.{current_layer-1}.2'
__a =convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
__a =checkpoint['out.0.weight']
__a =checkpoint['out.0.bias']
__a =checkpoint['out.2.weight']
__a =checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
_lowerCAmelCase : Optional[Any] = parser.parse_args()
_lowerCAmelCase : Optional[Any] = strabool(args.class_cond)
_lowerCAmelCase : Dict = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
_lowerCAmelCase : Tuple = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCAmelCase : Optional[int] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_lowerCAmelCase : int = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Tuple = con_pt_to_diffuser(args.unet_path, unet_config)
_lowerCAmelCase : Optional[int] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_lowerCAmelCase : int = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_lowerCAmelCase : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCAmelCase : List[str] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
_lowerCAmelCase : Any = CMStochasticIterativeScheduler(**scheduler_config)
_lowerCAmelCase : str = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 218 | 1 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _A :
@property
def __A ( self ) -> int:
'''simple docstring'''
return self.get_dummy_input()
@property
def __A ( self ) -> str:
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def __A ( self , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 4
__UpperCAmelCase : str = 32
__UpperCAmelCase : List[Any] = (32, 32)
__UpperCAmelCase : List[Any] = torch.manual_seed(0 )
__UpperCAmelCase : Tuple = torch.device(_a )
__UpperCAmelCase : Optional[Any] = (batch_size, num_channels) + sizes
__UpperCAmelCase : Dict = randn_tensor(_a , generator=_a , device=_a )
__UpperCAmelCase : Dict = {"""hidden_states""": hidden_states}
if include_temb:
__UpperCAmelCase : str = 128
__UpperCAmelCase : Any = randn_tensor((batch_size, temb_channels) , generator=_a , device=_a )
if include_res_hidden_states_tuple:
__UpperCAmelCase : Optional[int] = torch.manual_seed(1 )
__UpperCAmelCase : Any = (randn_tensor(_a , generator=_a , device=_a ),)
if include_encoder_hidden_states:
__UpperCAmelCase : Any = floats_tensor((batch_size, 32, 32) ).to(_a )
if include_skip_sample:
__UpperCAmelCase : Any = randn_tensor(((batch_size, 3) + sizes) , generator=_a , device=_a )
return dummy_input
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
__UpperCAmelCase : List[Any] = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
__UpperCAmelCase : Any = self.dummy_input
return init_dict, inputs_dict
def __A ( self , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.prepare_init_args_and_inputs_for_common()
__UpperCAmelCase : List[Any] = self.block_class(**_a )
unet_block.to(_a )
unet_block.eval()
with torch.no_grad():
__UpperCAmelCase : str = unet_block(**_a )
if isinstance(_a , _a ):
__UpperCAmelCase : Any = output[0]
self.assertEqual(output.shape , self.output_shape )
__UpperCAmelCase : Optional[int] = output[0, -1, -3:, -3:]
__UpperCAmelCase : Any = torch.tensor(_a ).to(_a )
assert torch_all_close(output_slice.flatten() , _a , atol=5E-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : int = self.prepare_init_args_and_inputs_for_common()
__UpperCAmelCase : Tuple = self.block_class(**_a )
model.to(_a )
model.train()
__UpperCAmelCase : Union[str, Any] = model(**_a )
if isinstance(_a , _a ):
__UpperCAmelCase : str = output[0]
__UpperCAmelCase : int = torch.device(_a )
__UpperCAmelCase : List[str] = randn_tensor(output.shape , device=_a )
__UpperCAmelCase : Tuple = torch.nn.functional.mse_loss(_a , _a )
loss.backward()
| 371 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class _A ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None:
'''simple docstring'''
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 16 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''vocab.json'''}
a = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
a = {'''mgp-str''': 2_7}
class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]="[GO]" , lowerCAmelCase : Optional[Any]="[GO]" , lowerCAmelCase : Union[str, Any]="[s]" , lowerCAmelCase : Any="[GO]" , **lowerCAmelCase : Dict ):
super().__init__(
unk_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , pad_token=snake_case_ , **snake_case_ , )
with open(snake_case_ , encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase = json.load(snake_case_ )
lowerCAmelCase = {v: k for k, v in self.vocab.items()}
@property
def __lowercase ( self : str ):
return len(self.vocab )
def __lowercase ( self : int ):
return dict(self.vocab , **self.added_tokens_encoder )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : int ):
lowerCAmelCase = []
for s in text:
char_tokens.extend(snake_case_ )
return char_tokens
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] ):
return self.vocab.get(snake_case_ , self.vocab.get(self.unk_token ) )
def __lowercase ( self : int , lowerCAmelCase : int ):
return self.decoder.get(snake_case_ )
def __lowercase ( self : int , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(snake_case_ ) )
return
lowerCAmelCase = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + """\n""" )
return (vocab_file,)
| 155 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__SCREAMING_SNAKE_CASE :Dict = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
def __init__( self : List[Any] , snake_case_ : int , snake_case_ : Dict=1_6 , snake_case_ : Dict=1_3 , snake_case_ : int=7 , snake_case_ : Any=1_4 , snake_case_ : int=1_0 , snake_case_ : Any=1_9 , snake_case_ : int=5 , snake_case_ : Any=4 , snake_case_ : Tuple=True , snake_case_ : Optional[int]=1_6 , snake_case_ : List[str]=2 , snake_case_ : Any=4 , snake_case_ : List[Any]=4 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Optional[int]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Tuple=[1, 2, 3, 4, 5] , snake_case_ : str=2_5 , snake_case_ : Any=5 , ):
_UpperCAmelCase = d_model
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length
_UpperCAmelCase = cardinality
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = embedding_dimension
_UpperCAmelCase = is_training
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = context_length
_UpperCAmelCase = prediction_length + label_length
_UpperCAmelCase = label_length
_UpperCAmelCase = moving_average
_UpperCAmelCase = autocorrelation_factor
def lowercase ( self : Union[str, Any] ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase ( self : int , snake_case_ : Optional[Any] ):
_UpperCAmelCase = config.context_length + max(config.lags_sequence )
_UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
_UpperCAmelCase = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowercase ( self : List[Any] ):
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : Optional[int] ):
_UpperCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
_UpperCAmelCase = model(**snake_case_ )
_UpperCAmelCase = outputs.encoder_last_hidden_state
_UpperCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = model.create_network_inputs(**snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_UpperCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_UpperCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_UpperCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
_UpperCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
_UpperCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCamelCase : Tuple = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCamelCase : List[Any] = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : int = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : List[Any] = False
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["missing_keys"] , [] )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowercase ( self : Optional[int] ):
pass
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = inspect.signature(getattr(snake_case_ , "forward" ) )
# The main input is the name of the argument after `self`
_UpperCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , "seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "decoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "encoder_seq_length" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "d_model" , snake_case_ )
_UpperCAmelCase = getattr(self.model_tester , "num_attention_heads" , snake_case_ )
_UpperCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
_UpperCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase ( self : Dict ):
super().test_retain_grad_hidden_states_attentions()
def UpperCAmelCase_ ( __lowercase : str="train-batch.pt" ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowercase , repo_type="dataset" )
_UpperCAmelCase = torch.load(__lowercase , map_location=__lowercase )
return batch
@require_torch
@slow
class A_ ( unittest.TestCase ):
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch()
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
_UpperCAmelCase = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
_UpperCAmelCase = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowercase ( self : Tuple ):
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
_UpperCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
_UpperCAmelCase = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
_UpperCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=snake_case_ )
_UpperCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 22 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : List[Any] = '''microsoft/speecht5_tts'''
_SCREAMING_SNAKE_CASE : Any = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
_SCREAMING_SNAKE_CASE : int = '''text_reader'''
_SCREAMING_SNAKE_CASE : List[str] = SpeechTaProcessor
_SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaForTextToSpeech
_SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGan
_SCREAMING_SNAKE_CASE : Optional[int] = ['''text''']
_SCREAMING_SNAKE_CASE : List[Any] = ['''audio''']
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.post_processor is None:
lowerCAmelCase__ = 'microsoft/speecht5_hifigan'
super().setup()
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowerCAmelCase__ = self.pre_processor(text=_UpperCamelCase , return_tensors='pt' , truncation=_UpperCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
lowerCAmelCase__ = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
lowerCAmelCase__ = torch.tensor(embeddings_dataset[73_05]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(_UpperCamelCase ).cpu().detach()
| 122 |
import qiskit
def _UpperCamelCase ( UpperCamelCase_ : int , UpperCamelCase_ : int ) -> qiskit.result.counts.Counts:
"""simple docstring"""
lowerCAmelCase__ = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
lowerCAmelCase__ = qiskit.QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowerCAmelCase__ = qiskit.execute(UpperCamelCase_ , UpperCamelCase_ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(f'Total count for various states are: {single_qubit_measure(1, 1)}')
| 122 | 1 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
assert isinstance(_A , _A )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
SCREAMING_SNAKE_CASE__ = features.copy()
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , features=_A , cache_dir=_A ).read()
assert isinstance(_A , _A )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , cache_dir=_A , split=_A ).read()
_check_json_dataset(_A , _A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if issubclass(_A , _A ):
SCREAMING_SNAKE_CASE__ = jsonl_path
elif issubclass(_A , _A ):
SCREAMING_SNAKE_CASE__ = [jsonl_path]
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_dataset(_A , _A )
def UpperCAmelCase_ ( _A , _A , _A=("train",) ):
'''simple docstring'''
assert isinstance(_A , _A )
for split in splits:
SCREAMING_SNAKE_CASE__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_A , keep_in_memory=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(_A ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = JsonDatasetReader({'''train''': jsonl_path} , features=_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if split:
SCREAMING_SNAKE_CASE__ = {split: jsonl_path}
else:
SCREAMING_SNAKE_CASE__ = '''train'''
SCREAMING_SNAKE_CASE__ = {'''train''': jsonl_path, '''test''': jsonl_path}
SCREAMING_SNAKE_CASE__ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(_A , cache_dir=_A ).read()
_check_json_datasetdict(_A , _A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return json.load(_A )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return [json.loads(_A ) for line in buffer]
class UpperCAmelCase__ :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ) -> Optional[int]:
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json_function(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert isinstance(exported_content[0] , __lowerCamelCase )
assert len(__lowerCamelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowercase_ ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , orient=__lowerCamelCase ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowerCamelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__lowerCamelCase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json_function(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert isinstance(exported_content[0] , __lowerCamelCase )
assert len(__lowerCamelCase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowercase_ ( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , orient=__lowerCamelCase , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowerCamelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__lowerCamelCase ) == 10
def lowercase_ ( self : Tuple , __lowerCamelCase : Optional[int] ) -> int:
with pytest.raises(__lowerCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowercase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp('''data''' ) / f'''test.json.{extension}'''
SCREAMING_SNAKE_CASE__ = str(shared_datadir / f'''test_file.json.{extension}''' )
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , compression=__lowerCamelCase ).write()
with fsspec.open(__lowerCamelCase , '''rb''' , compression='''infer''' ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
with fsspec.open(__lowerCamelCase , '''rb''' , compression='''infer''' ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert exported_content == original_content
| 314 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = 0
@slow
def lowercase_ ( self : List[str] ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCamelCase ) , 0 )
def lowercase_ ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
# Check that tokenizer_type ≠ model_type
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Tuple ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(__lowerCamelCase , '''vocab.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''bert''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(__lowerCamelCase , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(__lowerCamelCase , '''merges.txt''' ) )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , tokenizer_type='''gpt2''' )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> int:
with pytest.raises(__lowerCamelCase ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Tuple:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCamelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : Any ) -> str:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCamelCase , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : List[str] ) -> Tuple:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
SCREAMING_SNAKE_CASE__ = TOKENIZER_MAPPING.values()
SCREAMING_SNAKE_CASE__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : Optional[int] ) -> Any:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__lowerCamelCase ) , __lowerCamelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , __lowerCamelCase )
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''Hello, world. How are you?'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__lowerCamelCase )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
# Check we can load the tokenizer config of an online model.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = config.pop('''_commit_hash''' , __lowerCamelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCamelCase , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
self.assertDictEqual(__lowerCamelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_tokenizer_config(__lowerCamelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : int ) -> str:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizer.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : List[Any] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCamelCase ):
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : List[str] ) -> str:
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = False
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = NewTokenizer
a = False
try:
AutoConfig.register('''custom''' , __lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase )
AutoTokenizer.register(__lowerCamelCase , fast_tokenizer_class=__lowerCamelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(
__lowerCamelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ) -> Optional[int]:
with self.assertRaisesRegex(
__lowerCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' )
def lowercase_ ( self : Any ) -> Optional[Any]:
# Make sure we have cached the tokenizer.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 314 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=False , __A=True , __A="None" , __A=3 , __A=4 , __A=None , ) -> List[str]:
lowerCAmelCase_ :List[str] = parent
lowerCAmelCase_ :str = batch_size
lowerCAmelCase_ :int = seq_length
lowerCAmelCase_ :str = is_training
lowerCAmelCase_ :List[Any] = use_input_mask
lowerCAmelCase_ :Union[str, Any] = use_token_type_ids
lowerCAmelCase_ :int = use_labels
lowerCAmelCase_ :List[Any] = vocab_size
lowerCAmelCase_ :Tuple = hidden_size
lowerCAmelCase_ :Union[str, Any] = num_hidden_layers
lowerCAmelCase_ :Any = num_attention_heads
lowerCAmelCase_ :Tuple = intermediate_size
lowerCAmelCase_ :List[Any] = hidden_act
lowerCAmelCase_ :Any = hidden_dropout_prob
lowerCAmelCase_ :List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ :List[str] = max_position_embeddings
lowerCAmelCase_ :Optional[int] = type_vocab_size
lowerCAmelCase_ :Optional[int] = type_sequence_label_size
lowerCAmelCase_ :Tuple = initializer_range
lowerCAmelCase_ :List[Any] = num_labels
lowerCAmelCase_ :Tuple = num_choices
lowerCAmelCase_ :Optional[Any] = relative_attention
lowerCAmelCase_ :str = position_biased_input
lowerCAmelCase_ :List[Any] = pos_att_type
lowerCAmelCase_ :Any = scope
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :int = None
if self.use_input_mask:
lowerCAmelCase_ :Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :List[str] = None
if self.use_token_type_ids:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :str = None
lowerCAmelCase_ :str = None
if self.use_labels:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ :Optional[int] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :Any = TFDebertaVaModel(config=__A )
lowerCAmelCase_ :Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase_ :int = [input_ids, input_mask]
lowerCAmelCase_ :Union[str, Any] = model(__A )
lowerCAmelCase_ :Optional[int] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[str]:
lowerCAmelCase_ :int = TFDebertaVaForMaskedLM(config=__A )
lowerCAmelCase_ :Any = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase_ :Any = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = self.num_labels
lowerCAmelCase_ :int = TFDebertaVaForSequenceClassification(config=__A )
lowerCAmelCase_ :List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase_ :List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.num_labels
lowerCAmelCase_ :Dict = TFDebertaVaForTokenClassification(config=__A )
lowerCAmelCase_ :List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase_ :List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :Tuple = TFDebertaVaForQuestionAnswering(config=__A )
lowerCAmelCase_ :List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase_ :List[str] = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Tuple = config_and_inputs
lowerCAmelCase_ :Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[int] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ :Tuple = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ :List[Any] = False
UpperCAmelCase_ :int = False
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Any = TFDebertaVaModelTester(self )
lowerCAmelCase_ :int = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(__A )
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __lowerCAmelCase ( self ) -> Any:
pass
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
lowerCAmelCase_ :Optional[Any] = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowerCAmelCase_ :Optional[int] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase_ :Dict = model(__A , attention_mask=__A )[0]
lowerCAmelCase_ :Dict = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __A , atol=1E-4 )
| 1 |
"""simple docstring"""
__UpperCAmelCase = 2_56
# Modulus to hash a string
__UpperCAmelCase = 1_00_00_03
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
if p_len > t_len:
return False
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Any = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase__ ):
lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase_ :Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :int = """abc1abc12"""
lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ )
# Test 2)
lowerCAmelCase_ :Dict = """ABABX"""
lowerCAmelCase_ :int = """ABABZABABYABABX"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 3)
lowerCAmelCase_ :Union[str, Any] = """AAAB"""
lowerCAmelCase_ :List[str] = """ABAAAAAB"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 4)
lowerCAmelCase_ :Dict = """abcdabcy"""
lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 5)
lowerCAmelCase_ :Optional[int] = """Lü"""
lowerCAmelCase_ :Optional[int] = """Lüsai"""
assert rabin_karp(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """Lue"""
assert not rabin_karp(lowercase__ , lowercase__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 1 | 1 |
def __magic_name__ ( A : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
a = gray_code_sequence_string(A )
#
# convert them to integers
for i in range(len(A ) ):
a = int(sequence[i], 2 )
return sequence
def __magic_name__ ( A : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a = gray_code_sequence_string(bit_count - 1 )
a = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a = "0" + smaller_sequence[i]
sequence.append(A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a = "1" + smaller_sequence[i]
sequence.append(A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a : Tuple = ["gpt2"]
a : Dict = "gpt2"
if is_tf_available():
class UpperCamelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCAmelCase : int = TFGPTaLMHeadModel.from_config(snake_case )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case )
UpperCAmelCase : Optional[int] = tokenized["input_ids"].to_tensor()
UpperCAmelCase : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
UpperCAmelCase : List[Any] = self.model(input_ids=snake_case , attention_mask=snake_case )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : Any = [GPTaTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
UpperCAmelCase : Optional[Any] = [TFGPTaTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase : Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCAmelCase : Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
UpperCAmelCase : List[Any] = tokenizer([test_inputs] , return_tensors="tf" )
UpperCAmelCase : Any = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
UpperCAmelCase : Dict = python_outputs[key].numpy()
UpperCAmelCase : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(snake_case , tf.intaa ) == tf_outputs_values ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Optional[Any] = tf.function(snake_case )
for test_inputs in self.test_sentences:
UpperCAmelCase : List[str] = tf.constant(snake_case )
UpperCAmelCase : Dict = compiled_tokenizer(snake_case )
UpperCAmelCase : Union[str, Any] = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : int = ModelToSave(tokenizer=snake_case )
UpperCAmelCase : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : str = model.serving(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase : Optional[int] = Path(snake_case ) / "saved.model"
tf.saved_model.save(snake_case , snake_case , signatures={"serving_default": model.serving} )
UpperCAmelCase : int = tf.saved_model.load(snake_case )
UpperCAmelCase : str = loaded_model.signatures["serving_default"](snake_case )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case ) # Build model with some sample inputs
UpperCAmelCase : Union[str, Any] = tf_tokenizer.get_config()
UpperCAmelCase : str = TFGPTaTokenizer.from_config(snake_case )
UpperCAmelCase : Tuple = model_from_config(snake_case )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A_ ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
UpperCAmelCase : List[str] = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
UpperCAmelCase : Any = tf.convert_to_tensor([self.test_sentences[0]] )
UpperCAmelCase : Tuple = tf_tokenizer(snake_case , max_length=snake_case )
UpperCAmelCase : Union[str, Any] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 311 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 279 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 279 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCamelCase : Union[str, Any] = int(input('Enter number: ').strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 2 |
"""simple docstring"""
from PIL import Image
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.size
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = image.load()
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCAmelCase_ ):
for i in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
a__ : List[str] = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 54 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowerCAmelCase__ = logging.getLogger(__name__)
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[int] ):
lowercase__ : Optional[int] = False
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ):
if not self.initialized:
lowercase__ : List[Any] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=SCREAMING_SNAKE_CASE , generator_tokenizer=SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE , init_retrieval=SCREAMING_SNAKE_CASE , )
lowercase__ : List[Any] = True
def snake_case ( self : Optional[Any] ):
self.retriever.index.init_index()
def snake_case ( self : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ , lowercase__ : Dict = self.retriever._main_retrieve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return doc_ids, retrieved_doc_embeds
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any]=None ):
if index is not None and index.is_initialized() and len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(
"When using Ray for distributed fine-tuning, "
"you'll need to provide the paths instead, "
"as the dataset and the index are loaded "
"separately. More info in examples/rag/use_own_knowledge_dataset.py " )
super().__init__(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=SCREAMING_SNAKE_CASE , generator_tokenizer=SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE , init_retrieval=SCREAMING_SNAKE_CASE , )
lowercase__ : Optional[Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for worker in self.retrieval_workers
] )
def snake_case ( self : str ):
logger.info("initializing retrieval" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowercase__ : List[str] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowercase__ , lowercase__ : Union[str, Any] = ray.get(random_worker.retrieve.remote(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
else:
lowercase__ , lowercase__ : List[str] = self._main_retrieve(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(SCREAMING_SNAKE_CASE )
@classmethod
def snake_case ( cls : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=None , **SCREAMING_SNAKE_CASE : Dict ):
return super(SCREAMING_SNAKE_CASE , cls ).get_tokenizers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@classmethod
def snake_case ( cls : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int=None , **SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Tuple = kwargs.pop("config" , SCREAMING_SNAKE_CASE ) or RagConfig.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : Dict = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = rag_tokenizer.question_encoder
lowercase__ : List[str] = rag_tokenizer.generator
if indexed_dataset is not None:
lowercase__ : Tuple = "custom"
lowercase__ : Optional[int] = CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE )
else:
lowercase__ : Optional[Any] = cls._build_index(SCREAMING_SNAKE_CASE )
return cls(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=SCREAMING_SNAKE_CASE , generator_tokenizer=SCREAMING_SNAKE_CASE , retrieval_workers=SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE , )
| 121 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowerCAmelCase__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ = f'''down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ = f'''down_blocks.{i}.attentions.{j}.'''
lowerCAmelCase__ = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ = f'''up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ = f'''up_blocks.{i}.attentions.{j}.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCAmelCase__ = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ = '''mid_block.attentions.0.'''
lowerCAmelCase__ = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ = f'''mid_block.resnets.{j}.'''
lowerCAmelCase__ = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowercase__ : str = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowercase__ : List[str] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : str = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowercase__ : int = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[Any] = v
lowercase__ : Union[str, Any] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ = f'''down_blocks.{i}.downsamplers.0.'''
lowerCAmelCase__ = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase__ = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ = f'''mid_block.resnets.{i}.'''
lowerCAmelCase__ = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowercase__ : Optional[int] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Dict = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowercase__ : List[str] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = v
lowercase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
lowercase__ : Optional[int] = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"""mid.attn_1.{weight_name}.weight""" in k:
print(F"""Reshaping {k} for SD format""" )
lowercase__ : Dict = reshape_weight_for_sd(lowerCamelCase__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowerCAmelCase__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ = {'''q''': 0, '''k''': 1, '''v''': 2}
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = {}
lowercase__ : List[Any] = {}
lowercase__ : List[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowercase__ : int = k[: -len(".q_proj.weight" )]
lowercase__ : Optional[Any] = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowercase__ : Dict = [None, None, None]
lowercase__ : Any = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowercase__ : Optional[int] = k[: -len(".q_proj.bias" )]
lowercase__ : Any = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowercase__ : str = [None, None, None]
lowercase__ : str = v
continue
lowercase__ : Union[str, Any] = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : List[Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase__ : str = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : Any = torch.cat(lowerCamelCase__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase__ : List[str] = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : Tuple = torch.cat(lowerCamelCase__ )
return new_state_dict
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowerCAmelCase__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ = load_file(unet_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowerCAmelCase__ = load_file(vae_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowerCAmelCase__ = load_file(text_enc_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowerCAmelCase__ = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowerCAmelCase__ = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 121 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Union[str, Any] = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''BeitFeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106 |
'''simple docstring'''
import numpy
# List of input, output pairs
lowerCamelCase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCamelCase = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCamelCase = [2, 4, 1, 5]
lowerCamelCase = len(train_data)
lowerCamelCase = 0.0_0_9
def _A ( _lowerCAmelCase , _lowerCAmelCase="train" ):
"""simple docstring"""
return calculate_hypothesis_value(_lowerCAmelCase , _lowerCAmelCase ) - output(
_lowerCAmelCase , _lowerCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =0
for i in range(len(_lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _A ( _lowerCAmelCase , _lowerCAmelCase=m ):
"""simple docstring"""
__lowercase =0
for i in range(_lowerCAmelCase ):
if index == -1:
summation_value += _error(_lowerCAmelCase )
else:
summation_value += _error(_lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =summation_of_cost_derivative(_lowerCAmelCase , _lowerCAmelCase ) / m
return cost_derivative_value
def _A ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowercase =0.00_00_02
__lowercase =0
__lowercase =0
while True:
j += 1
__lowercase =[0, 0, 0, 0]
for i in range(0 , len(_lowerCAmelCase ) ):
__lowercase =get_cost_derivative(i - 1 )
__lowercase =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase , rtol=_lowerCAmelCase , ):
break
__lowercase =temp_parameter_vector
print(('Number of iterations:', j) )
def _A ( ):
"""simple docstring"""
for i in range(len(_lowerCAmelCase ) ):
print(('Actual output value:', output(_lowerCAmelCase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(_lowerCAmelCase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 166 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def A_ ( snake_case_ : Optional[Any] ):
'''simple docstring'''
UpperCamelCase : List[str] = fname.split(os.path.sep )[-1]
return re.search(R"""^(.*)_\d+\.jpg$""" ,snake_case_ ).groups()[0]
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
UpperCamelCase : Union[str, Any] = file_names
UpperCamelCase : Any = image_transform
UpperCamelCase : Tuple = label_to_id
def __len__( self ):
return len(self.file_names )
def __getitem__( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = self.file_names[idx]
UpperCamelCase : Optional[int] = PIL.Image.open(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = raw_image.convert("""RGB""" )
if self.image_transform is not None:
UpperCamelCase : List[str] = self.image_transform(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = extract_label(SCREAMING_SNAKE_CASE_ )
if self.label_to_id is not None:
UpperCamelCase : Tuple = self.label_to_id[label]
return {"image": image, "label": label}
def A_ ( snake_case_ : List[str] ,snake_case_ : Any ):
'''simple docstring'''
# Initialize accelerator
if args.with_tracking:
UpperCamelCase : List[Any] = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with="""all""" ,project_dir=args.project_dir )
else:
UpperCamelCase : Dict = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase : int = config["""lr"""]
UpperCamelCase : Any = int(config["""num_epochs"""] )
UpperCamelCase : Any = int(config["""seed"""] )
UpperCamelCase : List[Any] = int(config["""batch_size"""] )
UpperCamelCase : Optional[int] = config["""image_size"""]
if not isinstance(snake_case_ ,(list, tuple) ):
UpperCamelCase : List[Any] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps ,"""isdigit""" ):
if args.checkpointing_steps == "epoch":
UpperCamelCase : int = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCamelCase : List[str] = int(args.checkpointing_steps )
else:
raise ValueError(
f'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
UpperCamelCase : int = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCamelCase : List[Any] = os.path.split(snake_case_ )[-1].split(""".""" )[0]
accelerator.init_trackers(snake_case_ ,snake_case_ )
# Grab all the image filenames
UpperCamelCase : Optional[Any] = [os.path.join(args.data_dir ,snake_case_ ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )]
# Build the label correspondences
UpperCamelCase : int = [extract_label(snake_case_ ) for fname in file_names]
UpperCamelCase : int = list(set(snake_case_ ) )
id_to_label.sort()
UpperCamelCase : Optional[int] = {lbl: i for i, lbl in enumerate(snake_case_ )}
# Set the seed before splitting the data.
np.random.seed(snake_case_ )
torch.manual_seed(snake_case_ )
torch.cuda.manual_seed_all(snake_case_ )
# Split our filenames between train and validation
UpperCamelCase : Any = np.random.permutation(len(snake_case_ ) )
UpperCamelCase : Any = int(0.8 * len(snake_case_ ) )
UpperCamelCase : List[str] = random_perm[:cut]
UpperCamelCase : int = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCamelCase : Any = Compose([RandomResizedCrop(snake_case_ ,scale=(0.5, 1.0) ), ToTensor()] )
UpperCamelCase : Dict = PetsDataset(
[file_names[i] for i in train_split] ,image_transform=snake_case_ ,label_to_id=snake_case_ )
# For evaluation, we use a deterministic Resize
UpperCamelCase : str = Compose([Resize(snake_case_ ), ToTensor()] )
UpperCamelCase : int = PetsDataset([file_names[i] for i in eval_split] ,image_transform=snake_case_ ,label_to_id=snake_case_ )
# Instantiate dataloaders.
UpperCamelCase : Optional[Any] = DataLoader(snake_case_ ,shuffle=snake_case_ ,batch_size=snake_case_ ,num_workers=4 )
UpperCamelCase : Tuple = DataLoader(snake_case_ ,shuffle=snake_case_ ,batch_size=snake_case_ ,num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase : List[Any] = create_model("""resnet50d""" ,pretrained=snake_case_ ,num_classes=len(snake_case_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase : str = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCamelCase : int = False
for param in model.get_classifier().parameters():
UpperCamelCase : List[Any] = True
# We normalize the batches of images to be a bit faster.
UpperCamelCase : Union[str, Any] = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device )
UpperCamelCase : Optional[int] = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCamelCase : Any = torch.optim.Adam(params=model.parameters() ,lr=lr / 2_5 )
# Instantiate learning rate scheduler
UpperCamelCase : Optional[int] = OneCycleLR(optimizer=snake_case_ ,max_lr=snake_case_ ,epochs=snake_case_ ,steps_per_epoch=len(snake_case_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase : Tuple = accelerator.prepare(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase : Optional[Any] = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCamelCase : Optional[int] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase : int = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCamelCase : Dict = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCamelCase : str = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCamelCase : List[Any] = os.path.splitext(snake_case_ )[0]
if "epoch" in training_difference:
UpperCamelCase : Tuple = int(training_difference.replace("""epoch_""" ,"""""" ) ) + 1
UpperCamelCase : Dict = None
else:
UpperCamelCase : int = int(training_difference.replace("""step_""" ,"""""" ) )
UpperCamelCase : Optional[int] = resume_step // len(snake_case_ )
resume_step -= starting_epoch * len(snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ,snake_case_ ):
model.train()
if args.with_tracking:
UpperCamelCase : List[str] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCamelCase : Any = accelerator.skip_first_batches(snake_case_ ,snake_case_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCamelCase : str = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCamelCase : Optional[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCamelCase : Union[str, Any] = (batch["""image"""] - mean) / std
UpperCamelCase : str = model(snake_case_ )
UpperCamelCase : List[Any] = torch.nn.functional.cross_entropy(snake_case_ ,batch["""label"""] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(snake_case_ ,snake_case_ ):
UpperCamelCase : Union[str, Any] = f'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCamelCase : int = os.path.join(args.output_dir ,snake_case_ )
accelerator.save_state(snake_case_ )
model.eval()
UpperCamelCase : int = 0
UpperCamelCase : List[Any] = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCamelCase : str = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCamelCase : Optional[int] = (batch["""image"""] - mean) / std
with torch.no_grad():
UpperCamelCase : Any = model(snake_case_ )
UpperCamelCase : List[str] = outputs.argmax(dim=-1 )
UpperCamelCase : int = accelerator.gather_for_metrics((predictions, batch["""label"""]) )
UpperCamelCase : Dict = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCamelCase : int = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}: {1_0_0 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"""accuracy""": 1_0_0 * eval_metric,
"""train_loss""": total_loss.item() / len(snake_case_ ),
"""epoch""": epoch,
} ,step=snake_case_ ,)
if checkpointing_steps == "epoch":
UpperCamelCase : Any = f'epoch_{epoch}'
if args.output_dir is not None:
UpperCamelCase : int = os.path.join(args.output_dir ,snake_case_ )
accelerator.save_state(snake_case_ )
if args.with_tracking:
accelerator.end_training()
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Dict = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument("""--data_dir""" ,required=snake_case_ ,help="""The data folder on disk.""" )
parser.add_argument("""--fp16""" ,action="""store_true""" ,help="""If passed, will use FP16 training.""" )
parser.add_argument(
"""--mixed_precision""" ,type=snake_case_ ,default=snake_case_ ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--checkpointing_steps""" ,type=snake_case_ ,default=snake_case_ ,help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" ,)
parser.add_argument(
"""--output_dir""" ,type=snake_case_ ,default=""".""" ,help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" ,)
parser.add_argument(
"""--resume_from_checkpoint""" ,type=snake_case_ ,default=snake_case_ ,help="""If the training should continue from a checkpoint folder.""" ,)
parser.add_argument(
"""--with_tracking""" ,action="""store_true""" ,help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" ,)
parser.add_argument(
"""--project_dir""" ,type=snake_case_ ,default="""logs""" ,help="""Location on where to store experiment tracking logs` and relevent project information""" ,)
UpperCamelCase : int = parser.parse_args()
UpperCamelCase : List[Any] = {"""lr""": 3e-2, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 6_4, """image_size""": 2_2_4}
training_function(snake_case_ ,snake_case_ )
if __name__ == "__main__":
main()
| 361 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Optional[int] = 'mvp'
lowercase : Optional[Any] = ['past_key_values']
lowercase : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_0267 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=800 , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Optional[int] = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : Any = encoder_layers
UpperCamelCase : List[Any] = encoder_attention_heads
UpperCamelCase : Optional[Any] = decoder_ffn_dim
UpperCamelCase : Optional[int] = decoder_layers
UpperCamelCase : Dict = decoder_attention_heads
UpperCamelCase : List[str] = dropout
UpperCamelCase : List[str] = attention_dropout
UpperCamelCase : List[Any] = activation_dropout
UpperCamelCase : Dict = activation_function
UpperCamelCase : List[str] = init_std
UpperCamelCase : int = encoder_layerdrop
UpperCamelCase : Dict = decoder_layerdrop
UpperCamelCase : Any = classifier_dropout
UpperCamelCase : Tuple = use_cache
UpperCamelCase : Dict = encoder_layers
UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase : Optional[Any] = use_prompt
UpperCamelCase : Any = prompt_length
UpperCamelCase : List[Any] = prompt_mid_dim
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , forced_eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"""The config can simply be saved and uploaded again to be fixed.""" )
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : int = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 83 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : List[str] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'camembert'
def __init__( self : int , lowerCAmelCase_ : Tuple=3_05_22 , lowerCAmelCase_ : Tuple=7_68 , lowerCAmelCase_ : List[str]=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : Tuple=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Tuple=5_12 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Tuple=1e-12 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict="absolute" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Optional[int] =vocab_size
A__ : Any =hidden_size
A__ : Optional[Any] =num_hidden_layers
A__ : Optional[int] =num_attention_heads
A__ : Optional[int] =hidden_act
A__ : Optional[Any] =intermediate_size
A__ : Tuple =hidden_dropout_prob
A__ : Union[str, Any] =attention_probs_dropout_prob
A__ : List[str] =max_position_embeddings
A__ : int =type_vocab_size
A__ : int =initializer_range
A__ : Tuple =layer_norm_eps
A__ : Dict =position_embedding_type
A__ : List[str] =use_cache
A__ : Dict =classifier_dropout
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
@property
def lowercase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ : Optional[int] ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ : List[str] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 134 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_lowerCamelCase : Optional[int] = NewType('''DataClass''', Any)
_lowerCamelCase : Tuple = NewType('''DataClassType''', Any)
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def __lowerCamelCase (UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = {str(SCREAMING_SNAKE_CASE_ ): choice for choice in choices}
return lambda UpperCAmelCase__ : str_to_choice.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase (*,
UpperCAmelCase__ : Tuple = None , UpperCAmelCase__ : List[str] = None , UpperCAmelCase__ : List[Any] = dataclasses.MISSING , UpperCAmelCase__ : List[str] = dataclasses.MISSING , UpperCAmelCase__ : Tuple = None , **UpperCAmelCase__ : int , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE = {}
if aliases is not None:
SCREAMING_SNAKE_CASE = aliases
if help is not None:
SCREAMING_SNAKE_CASE = help
return dataclasses.field(metadata=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , default_factory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class lowercase ( a ):
lowercase__ : Iterable[DataClassType]
def __init__( self : Tuple , _UpperCamelCase : Tuple , **_UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE = ArgumentDefaultsHelpFormatter
super().__init__(**_lowercase )
if dataclasses.is_dataclass(_lowercase ):
SCREAMING_SNAKE_CASE = [dataclass_types]
SCREAMING_SNAKE_CASE = list(_lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowercase )
@staticmethod
def __snake_case( _UpperCamelCase : List[str] , _UpperCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = F"--{field.name}"
SCREAMING_SNAKE_CASE = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _lowercase ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
SCREAMING_SNAKE_CASE = kwargs.pop("aliases" , [] )
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE = [aliases]
SCREAMING_SNAKE_CASE = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(_lowercase , "UnionType" ) and isinstance(_lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowercase ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F" Problem encountered in field '{field.name}'." )
if type(_lowercase ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE = (
field.type.__args__[0] if isinstance(_lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE = {}
if origin_type is Literal or (isinstance(field.type , _lowercase ) and issubclass(field.type , _lowercase )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE = field.type.__args__
else:
SCREAMING_SNAKE_CASE = [x.value for x in field.type]
SCREAMING_SNAKE_CASE = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE = field.default
else:
SCREAMING_SNAKE_CASE = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE = copy(_lowercase )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE = "?"
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE = True
elif isclass(_lowercase ) and issubclass(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE = field.type.__args__[0]
SCREAMING_SNAKE_CASE = "+"
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE = True
else:
SCREAMING_SNAKE_CASE = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE = field.default_factory()
else:
SCREAMING_SNAKE_CASE = True
parser.add_argument(_lowercase , *_lowercase , **_lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE = False
parser.add_argument(F"--no_{field.name}" , action="store_false" , dest=field.name , **_lowercase )
def __snake_case( self : List[Any] , _UpperCamelCase : Tuple ) -> Any:
'''simple docstring'''
if hasattr(_lowercase , "_argument_group_name" ):
SCREAMING_SNAKE_CASE = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE = self
try:
SCREAMING_SNAKE_CASE = get_type_hints(_lowercase )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowercase ):
SCREAMING_SNAKE_CASE = ".".join(map(_lowercase , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(_lowercase ):
if not field.init:
continue
SCREAMING_SNAKE_CASE = type_hints[field.name]
self._parse_dataclass_field(_lowercase , _lowercase )
def __snake_case( self : List[Any] , _UpperCamelCase : Dict=None , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[Any]=None , _UpperCamelCase : str=None , ) -> Tuple[DataClass, ...]:
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE = []
if args_filename:
args_files.append(Path(_lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE = ArgumentParser()
args_file_parser.add_argument(_lowercase , type=_lowercase , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = args_file_parser.parse_known_args(args=_lowercase )
SCREAMING_SNAKE_CASE = vars(_lowercase ).get(args_file_flag.lstrip("-" ) , _lowercase )
if cmd_args_file_paths:
args_files.extend([Path(_lowercase ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.parse_known_args(args=_lowercase )
SCREAMING_SNAKE_CASE = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(_lowercase ) if f.init}
SCREAMING_SNAKE_CASE = {k: v for k, v in vars(_lowercase ).items() if k in keys}
for k in keys:
delattr(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE = dtype(**_lowercase )
outputs.append(_lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def __snake_case( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = set(args.keys() )
SCREAMING_SNAKE_CASE = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(_lowercase ) if f.init}
SCREAMING_SNAKE_CASE = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE = dtype(**_lowercase )
outputs.append(_lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(_lowercase )}" )
return tuple(_lowercase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
with open(Path(_lowercase ) , encoding="utf-8" ) as open_json_file:
SCREAMING_SNAKE_CASE = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE = self.parse_dict(_lowercase , allow_extra_keys=_lowercase )
return tuple(_lowercase )
def __snake_case( self : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] = False ) -> Tuple[DataClass, ...]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.parse_dict(yaml.safe_load(Path(_lowercase ).read_text() ) , allow_extra_keys=_lowercase )
return tuple(_lowercase )
| 366 | import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( a ):
lowercase__ : Dict = (UniPCMultistepScheduler,)
lowercase__ : Optional[int] = (("""num_inference_steps""", 25),)
def __snake_case( self : List[str] , **_UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**_UpperCamelCase )
return config
def __snake_case( self : List[str] , _UpperCamelCase : Dict=0 , **_UpperCamelCase : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_UpperCamelCase )
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sample, sample
for t in range(_UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case( self : Any , _UpperCamelCase : Union[str, Any]=0 , **_UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case( self : List[str] , _UpperCamelCase : Tuple=None , **_UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
if scheduler is None:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
return sample
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(_UpperCamelCase )
elif num_inference_steps is not None and not hasattr(_UpperCamelCase , "set_timesteps" ):
SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __snake_case( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __snake_case( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __snake_case( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.check_over_configs(thresholding=_UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , )
def __snake_case( self : Tuple ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , prediction_type=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = self.full_loop(
solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , prediction_type=_UpperCamelCase , )
assert not torch.isnan(_UpperCamelCase ).any(), "Samples have nan numbers"
def __snake_case( self : List[str] ) -> Tuple:
'''simple docstring'''
self.check_over_configs(lower_order_final=_UpperCamelCase )
self.check_over_configs(lower_order_final=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=_UpperCamelCase , time_step=0 )
def __snake_case( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop()
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __snake_case( self : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3
def __snake_case( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(thresholding=_UpperCamelCase , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.half()
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def __snake_case( self : List[str] , **_UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 206 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] = FlaxAutoencoderKL
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = 4
SCREAMING_SNAKE_CASE_: List[str] = 3
SCREAMING_SNAKE_CASE_: str = (32, 32)
SCREAMING_SNAKE_CASE_: Dict = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE_: List[str] = jax.random.uniform(lowerCAmelCase__ , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Dict = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
SCREAMING_SNAKE_CASE_: Any = self.dummy_input
return init_dict, inputs_dict
| 13 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 13 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: Tuple ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Optional[Any] , __lowerCamelCase: Optional[int] ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = hidden_states.shape
__UpperCAmelCase : Dict = jax.image.resize(
__lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
__UpperCAmelCase : Dict = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> Any:
__UpperCAmelCase : Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Dict , __lowerCamelCase: str ) -> List[Any]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__UpperCAmelCase : Any = self.conv(__lowerCamelCase )
return hidden_states
class _snake_case ( nn.Module ):
lowerCamelCase__: int
lowerCamelCase__: int = None
lowerCamelCase__: float = 0.0
lowerCamelCase__: bool = None
lowerCamelCase__: jnp.dtype = jnp.floataa
def _lowerCamelCase ( self: str ) -> List[str]:
__UpperCAmelCase : str = self.in_channels if self.out_channels is None else self.out_channels
__UpperCAmelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[Any] = nn.Dense(__lowerCamelCase , dtype=self.dtype )
__UpperCAmelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__UpperCAmelCase : Optional[Any] = nn.Dropout(self.dropout_prob )
__UpperCAmelCase : Tuple = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__UpperCAmelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__UpperCAmelCase : List[Any] = None
if use_nin_shortcut:
__UpperCAmelCase : Dict = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self: Tuple , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any]=True ) -> List[Any]:
__UpperCAmelCase : Dict = hidden_states
__UpperCAmelCase : int = self.norma(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = nn.swish(__lowerCamelCase )
__UpperCAmelCase : Tuple = self.conva(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = self.time_emb_proj(nn.swish(__lowerCamelCase ) )
__UpperCAmelCase : List[str] = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 )
__UpperCAmelCase : List[str] = hidden_states + temb
__UpperCAmelCase : Union[str, Any] = self.norma(__lowerCamelCase )
__UpperCAmelCase : Tuple = nn.swish(__lowerCamelCase )
__UpperCAmelCase : str = self.dropout(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.conva(__lowerCamelCase )
if self.conv_shortcut is not None:
__UpperCAmelCase : Optional[int] = self.conv_shortcut(__lowerCamelCase )
return hidden_states + residual
| 355 | import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_snake_case = pytest.mark.integration
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
__UpperCAmelCase : Optional[int] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(__lowerCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def _lowerCamelCase ( self: Optional[Any] ) -> Tuple:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
__UpperCAmelCase : int = dset.map(
lambda __lowerCamelCase , __lowerCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__lowerCamelCase , keep_in_memory=__lowerCamelCase )
__UpperCAmelCase : Tuple = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
__UpperCAmelCase , __UpperCAmelCase : Dict = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def _lowerCamelCase ( self: List[str] ) -> int:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__UpperCAmelCase , __UpperCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
import faiss
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(__lowerCamelCase , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
from elasticsearch import Elasticsearch
__UpperCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : int = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__UpperCAmelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__UpperCAmelCase : Any = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
import faiss
__UpperCAmelCase : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__UpperCAmelCase : Dict = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__UpperCAmelCase : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__UpperCAmelCase , __UpperCAmelCase : Any = index.search_batch(__lowerCamelCase )
self.assertRaises(__lowerCamelCase , index.search_batch , queries[0] )
__UpperCAmelCase : Dict = [scores[0] for scores in total_scores]
__UpperCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> List[str]:
import faiss
__UpperCAmelCase : Dict = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__UpperCAmelCase : Optional[Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__lowerCamelCase ):
__UpperCAmelCase : Any = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def _lowerCamelCase ( self: List[str] ) -> Dict:
import faiss
__UpperCAmelCase : str = faiss.IndexFlat(5 )
__UpperCAmelCase : int = FaissIndex(custom_index=__lowerCamelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def _lowerCamelCase ( self: Union[str, Any] ) -> int:
import faiss
__UpperCAmelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__lowerCamelCase ) as tmp_file:
index.save(tmp_file.name )
__UpperCAmelCase : List[str] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__UpperCAmelCase : Tuple = np.zeros(5 , dtype=np.floataa )
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search(__lowerCamelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _UpperCamelCase ( snake_case__ ) -> Optional[Any]:
import faiss
__UpperCAmelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__UpperCAmelCase : Optional[Any] = "index.faiss"
__UpperCAmelCase : Optional[int] = f'''mock://{index_name}'''
index.save(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : Dict = FaissIndex.load(snake_case__, storage_options=mockfs.storage_options )
__UpperCAmelCase : str = np.zeros(5, dtype=np.floataa )
__UpperCAmelCase : Any = 1
__UpperCAmelCase , __UpperCAmelCase : List[str] = index.search(snake_case__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _snake_case ( _lowercase ):
def _lowerCamelCase ( self: str ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
__UpperCAmelCase : Optional[Any] = Elasticsearch()
__UpperCAmelCase : Dict = {"acknowledged": True}
__UpperCAmelCase : Any = ElasticSearchIndex(es_client=__lowerCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
__UpperCAmelCase : Dict = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = index.search(__lowerCamelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__UpperCAmelCase : int = "foo"
__UpperCAmelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search(__lowerCamelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__UpperCAmelCase : int = ["foo", "bar", "foobar"]
__UpperCAmelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : List[Any] = index.search_batch(__lowerCamelCase )
__UpperCAmelCase : Tuple = [scores[0] for scores in total_scores]
__UpperCAmelCase : Optional[int] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
# batched queries with timeout
__UpperCAmelCase : str = ["foo", "bar", "foobar"]
__UpperCAmelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = index.search_batch(__lowerCamelCase , request_timeout=30 )
__UpperCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores]
__UpperCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__lowerCamelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __lowerCamelCase )
| 342 | 0 |
"""simple docstring"""
def __lowerCamelCase ( ) -> Optional[Any]:
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( a_ : Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE :List[Any] = 1
__SCREAMING_SNAKE_CASE :List[str] = 2
while i * i <= n:
__SCREAMING_SNAKE_CASE :Union[str, Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> Any:
return next(i for i in triangle_number_generator() if count_divisors(a_ ) > 5_00 )
if __name__ == "__main__":
print(solution()) | 191 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = '''beit'''
def __init__( self ,SCREAMING_SNAKE_CASE__=81_92 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-12 ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=[3, 5, 7, 11] ,SCREAMING_SNAKE_CASE__=[1, 2, 3, 6] ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=0.4 ,SCREAMING_SNAKE_CASE__=2_56 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=2_55 ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = vocab_size
__SCREAMING_SNAKE_CASE :Dict = hidden_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE :List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE :Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE :Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE :Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE :str = layer_norm_eps
__SCREAMING_SNAKE_CASE :int = image_size
__SCREAMING_SNAKE_CASE :Tuple = patch_size
__SCREAMING_SNAKE_CASE :Any = num_channels
__SCREAMING_SNAKE_CASE :Any = use_mask_token
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_relative_position_bias
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE :List[str] = layer_scale_init_value
__SCREAMING_SNAKE_CASE :Optional[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE :str = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE :Dict = out_indices
__SCREAMING_SNAKE_CASE :Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE :Optional[int] = use_auxiliary_head
__SCREAMING_SNAKE_CASE :Union[str, Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE :Dict = auxiliary_channels
__SCREAMING_SNAKE_CASE :Optional[int] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE :List[str] = auxiliary_concat_input
__SCREAMING_SNAKE_CASE :List[Any] = semantic_loss_ignore_index
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self ) -> float:
"""simple docstring"""
return 1E-4 | 191 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_lowerCAmelCase )
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
UpperCAmelCase : ClassVar[Features] = Features({"audio": Audio()} )
UpperCAmelCase : ClassVar[Features] = Features({"labels": ClassLabel} )
UpperCAmelCase : str = "audio"
UpperCAmelCase : str = "labels"
def __snake_case ( self , A_ ) -> Dict:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , A_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
lowerCAmelCase = copy.deepcopy(self )
lowerCAmelCase = self.label_schema.copy()
lowerCAmelCase = features[self.label_column]
lowerCAmelCase = label_schema
return task_template
@property
def __snake_case ( self ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
} | 361 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
lowerCAmelCase = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(A_ )
from datasets import load_dataset
lowerCAmelCase = load_dataset("""nielsr/rvlcdip-demo""" )
lowerCAmelCase = dataset["""train"""][0]["""image"""].convert("""RGB""" )
lowerCAmelCase = image_processor(A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**A_ )
lowerCAmelCase = outputs.logits
lowerCAmelCase = torch.Size((1, 16) )
self.assertEqual(logits.shape , A_ )
lowerCAmelCase = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=A_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) ) | 187 | 0 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class _snake_case ( a__ ):
snake_case__ = "xlm-prophetnet"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self : str , UpperCAmelCase : Optional[float] = 0.1 , UpperCAmelCase : Optional[Union[str, Callable]] = "gelu" , UpperCAmelCase : Optional[int] = 30522 , UpperCAmelCase : Optional[int] = 1024 , UpperCAmelCase : Optional[int] = 4096 , UpperCAmelCase : Optional[int] = 12 , UpperCAmelCase : Optional[int] = 16 , UpperCAmelCase : Optional[int] = 4096 , UpperCAmelCase : Optional[int] = 12 , UpperCAmelCase : Optional[int] = 16 , UpperCAmelCase : Optional[float] = 0.1 , UpperCAmelCase : Optional[float] = 0.1 , UpperCAmelCase : Optional[int] = 512 , UpperCAmelCase : Optional[float] = 0.0_2 , UpperCAmelCase : Optional[bool] = True , UpperCAmelCase : Optional[bool] = True , UpperCAmelCase : Optional[int] = 0 , UpperCAmelCase : Optional[int] = 2 , UpperCAmelCase : Optional[int] = 32 , UpperCAmelCase : Optional[int] = 128 , UpperCAmelCase : Optional[bool] = False , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[bool] = True , UpperCAmelCase : Optional[int] = 0 , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 2 , **UpperCAmelCase : List[str] , ):
__lowerCamelCase : str = vocab_size
__lowerCamelCase : str = hidden_size
__lowerCamelCase : List[Any] = encoder_ffn_dim
__lowerCamelCase : List[str] = num_encoder_layers
__lowerCamelCase : Dict = num_encoder_attention_heads
__lowerCamelCase : List[Any] = decoder_ffn_dim
__lowerCamelCase : int = num_decoder_layers
__lowerCamelCase : Optional[int] = num_decoder_attention_heads
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : Optional[Any] = init_std # Normal(0, this parameter)
__lowerCamelCase : Any = activation_function
# parameters for xlmprophetnet
__lowerCamelCase : List[str] = ngram
__lowerCamelCase : Union[str, Any] = num_buckets
__lowerCamelCase : Any = relative_max_distance
__lowerCamelCase : Optional[int] = disable_ngram_loss
__lowerCamelCase : List[Any] = eps
# 3 Types of Dropout
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : Optional[int] = activation_dropout
__lowerCamelCase : Optional[int] = dropout
__lowerCamelCase : Union[str, Any] = use_cache
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , add_cross_attention=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , **UpperCAmelCase , )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def lowerCamelCase__ ( self : int , UpperCAmelCase : Any ):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." ) | 135 | """simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 135 | 1 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowerCamelCase (a_ :Tuple , a_ :Dict) -> Dict:
lowercase :Tuple = tmp_path_factory.mktemp('''dset_infos_dir''')
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''') as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''')
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''') as f:
f.write('''''')
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''') as f:
f.write('''{"default": {"dataset_size": 42}}''')
lowercase :Union[str, Any] = DatasetInfosDict.from_directory(a_)
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''')}) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowerCamelCase (a_ :Dict , a_ :DatasetInfo) -> Tuple:
lowercase :Optional[int] = str(a_)
dataset_info.write_to_directory(a_)
lowercase :Optional[Any] = DatasetInfo.from_directory(a_)
assert dataset_info == reloaded
assert os.path.exists(os.path.join(a_ , '''dataset_info.json'''))
def lowerCamelCase () -> Optional[Any]:
lowercase :Union[str, Any] = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''')}) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
lowercase :Dict = dataset_info._to_yaml_dict()
assert sorted(a_) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML)
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str))
lowercase :List[str] = yaml.safe_dump(a_)
lowercase :List[str] = yaml.safe_load(a_)
assert dataset_info_yaml_dict == reloaded
def lowerCamelCase () -> List[Any]:
lowercase :Tuple = DatasetInfo()
lowercase :Any = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()}),
DatasetInfosDict({'''my_config_name''': DatasetInfo()}),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''')}) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
}),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42),
'''v2''': DatasetInfo(dataset_size=1337),
}),
] , )
def lowerCamelCase (a_ :str , a_ :DatasetInfosDict) -> int:
lowercase :Optional[int] = str(a_)
dataset_infos_dict.write_to_directory(a_)
lowercase :List[Any] = DatasetInfosDict.from_directory(a_)
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowercase :Union[str, Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowercase :int = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict())
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(a_ , '''README.md'''))
| 172 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase (a_ :int) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase () -> Optional[int]:
with parallel_backend('''spark'''):
assert ParallelBackendConfig.backend_name == "spark"
lowercase :Optional[int] = [1, 2, 3]
with pytest.raises(a_):
with parallel_backend('''unsupported backend'''):
map_nested(a_ , a_ , num_proc=2)
with pytest.raises(a_):
with parallel_backend('''unsupported backend'''):
map_nested(a_ , a_ , num_proc=-1)
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1])
def lowerCamelCase (a_ :Union[str, Any]) -> Optional[Any]:
lowercase :Optional[Any] = [1, 2]
lowercase :int = {'''a''': 1, '''b''': 2}
lowercase :List[Any] = {'''a''': [1, 2], '''b''': [3, 4]}
lowercase :Optional[int] = {'''a''': {'''1''': 1}, '''b''': 2}
lowercase :List[Any] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowercase :Optional[int] = [2, 3]
lowercase :Tuple = {'''a''': 2, '''b''': 3}
lowercase :Union[str, Any] = {'''a''': [2, 3], '''b''': [4, 5]}
lowercase :List[str] = {'''a''': {'''1''': 2}, '''b''': 3}
lowercase :Union[str, Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark'''):
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
assert map_nested(a_ , a_ , num_proc=a_) == expected_map_nested_sa
| 172 | 1 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __UpperCamelCase ( a__ ):
lowerCamelCase : Tuple =""""""
lowerCamelCase : Union[str, Any] ="""hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
super().__init__(self , **lowerCAmelCase__ )
a : Any = repo_info
a : str = token
a : Any = None
def __a ( self ) -> str:
if self.dir_cache is None:
a : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a : Optional[int] = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(lowerCAmelCase__ ): {"name": str(lowerCAmelCase__ ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = "rb" , **lowerCAmelCase__ , ) -> Tuple:
if not isinstance(self.repo_info , lowerCAmelCase__ ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
a : Union[str, Any] = hf_hub_url(self.repo_info.id , lowerCAmelCase__ , revision=self.repo_info.sha )
return fsspec.open(
lowerCAmelCase__ , mode=lowerCAmelCase__ , headers=get_authentication_headers_for_url(lowerCAmelCase__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
self._get_dirs()
a : Dict = self._strip_protocol(lowerCAmelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> List[Any]:
self._get_dirs()
a : Union[str, Any] = PurePosixPath(path.strip("/" ) )
a : Optional[Any] = {}
for p, f in self.dir_cache.items():
a : Union[str, Any] = PurePosixPath(p.strip("/" ) )
a : Tuple = p.parent
if root == path:
a : Optional[Any] = f
a : int = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 105 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations(snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
def count_of_possible_combinations_with_dp_array(
snake_case__ , snake_case__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__UpperCamelCase : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__UpperCamelCase : List[str] = answer
return answer
__UpperCamelCase : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = [0] * (target + 1)
__UpperCamelCase : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = 3
_lowerCAmelCase = 5
_lowerCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 298 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = AltDiffusionPipeline
__snake_case = TEXT_TO_IMAGE_PARAMS
__snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
A__ : List[Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A__ : Union[str, Any] =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
A__ : Dict =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
A__ : Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
A__ : Tuple =CLIPTextModel(lowerCAmelCase_ )
A__ : Dict =XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ : List[str] =77
A__ : Tuple ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int]=0 ) -> Optional[int]:
'''simple docstring'''
if str(lowerCAmelCase_ ).startswith("""mps""" ):
A__ : Optional[Any] =torch.manual_seed(lowerCAmelCase_ )
else:
A__ : Any =torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
A__ : int ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
A__ : str ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] =self.get_dummy_components()
torch.manual_seed(0 )
A__ : Any =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
A__ : int =RobertaSeriesModelWithTransformation(lowerCAmelCase_ )
A__ : int =text_encoder
A__ : Dict =AltDiffusionPipeline(**lowerCAmelCase_ )
A__ : Dict =alt_pipe.to(lowerCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : List[str] =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : Dict ="""A photo of an astronaut"""
A__ : Dict =alt_pipe(**lowerCAmelCase_ )
A__ : List[Any] =output.images
A__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : Union[str, Any] =np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ : int ="""cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Any =self.get_dummy_components()
A__ : str =PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
A__ : Any =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
A__ : List[Any] =RobertaSeriesModelWithTransformation(lowerCAmelCase_ )
A__ : List[Any] =text_encoder
A__ : int =AltDiffusionPipeline(**lowerCAmelCase_ )
A__ : Union[str, Any] =alt_pipe.to(lowerCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Dict =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : List[Any] =alt_pipe(**lowerCAmelCase_ )
A__ : Dict =output.images
A__ : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : Optional[int] =np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
# make sure here that pndm scheduler skips prk
A__ : List[str] =AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=lowerCAmelCase_ )
A__ : List[str] =alt_pipe.to(lowerCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Union[str, Any] ="""A painting of a squirrel eating a burger"""
A__ : Optional[int] =torch.manual_seed(0 )
A__ : Optional[Any] =alt_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" )
A__ : Tuple =output.images
A__ : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Tuple =np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
A__ : Any =DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" )
A__ : List[str] =AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
A__ : int =alt_pipe.to(lowerCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Optional[int] ="""A painting of a squirrel eating a burger"""
A__ : List[Any] =torch.manual_seed(0 )
A__ : List[Any] =alt_pipe([prompt] , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""numpy""" )
A__ : List[Any] =output.images
A__ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ : List[str] =np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 136 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
__snake_case : Optional[Any] = None
__snake_case : Optional[Any] = {
'7B': 1_1008,
'13B': 1_3824,
'30B': 1_7920,
'65B': 2_2016,
'70B': 2_8672,
}
__snake_case : Union[str, Any] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : str=1, __snake_case : Tuple=256 ) -> str:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __lowerCamelCase ( __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
with open(__snake_case, """r""" ) as f:
return json.load(__snake_case )
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Tuple ) -> Dict:
"""simple docstring"""
with open(__snake_case, """w""" ) as f:
json.dump(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Any, __snake_case : Any, __snake_case : Tuple=True ) -> Any:
"""simple docstring"""
os.makedirs(__snake_case, exist_ok=__snake_case )
A__ : List[Any] =os.path.join(__snake_case, """tmp""" )
os.makedirs(__snake_case, exist_ok=__snake_case )
A__ : Dict =read_json(os.path.join(__snake_case, """params.json""" ) )
A__ : Dict =NUM_SHARDS[model_size]
A__ : List[str] =params["""n_layers"""]
A__ : int =params["""n_heads"""]
A__ : str =n_heads // num_shards
A__ : Tuple =params["""dim"""]
A__ : Union[str, Any] =dim // n_heads
A__ : str =1_00_00.0
A__ : Any =1.0 / (base ** (torch.arange(0, __snake_case, 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
A__ : Optional[Any] =params["""n_kv_heads"""] # for GQA / MQA
A__ : int =n_heads_per_shard // num_key_value_heads
A__ : int =dim // num_key_value_heads
else: # compatibility with other checkpoints
A__ : List[Any] =n_heads
A__ : List[str] =n_heads_per_shard
A__ : Dict =dim
# permute for sliced rotary
def permute(__snake_case : Tuple, __snake_case : Optional[int]=n_heads, __snake_case : int=dim, __snake_case : Optional[Any]=dim ):
return w.view(__snake_case, dima // n_heads // 2, 2, __snake_case ).transpose(1, 2 ).reshape(__snake_case, __snake_case )
print(f"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
A__ : List[str] =torch.load(os.path.join(__snake_case, """consolidated.00.pth""" ), map_location="""cpu""" )
else:
# Sharded
A__ : Optional[Any] =[
torch.load(os.path.join(__snake_case, f"consolidated.{i:02d}.pth" ), map_location="""cpu""" )
for i in range(__snake_case )
]
A__ : Optional[Any] =0
A__ : str ={"""weight_map""": {}}
for layer_i in range(__snake_case ):
A__ : Dict =f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
A__ : Dict ={
f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wq.weight"] ),
f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wk.weight"] ),
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
A__ : Any ={
f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
A__ : Optional[Any] =permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(__snake_case, __snake_case, __snake_case )
for i in range(__snake_case )
], dim=0, ).reshape(__snake_case, __snake_case ) )
A__ : int =permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
__snake_case, __snake_case, __snake_case )
for i in range(__snake_case )
], dim=0, ).reshape(__snake_case, __snake_case ), __snake_case, __snake_case, __snake_case, )
A__ : int =torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
__snake_case, __snake_case, __snake_case )
for i in range(__snake_case )
], dim=0, ).reshape(__snake_case, __snake_case )
A__ : List[str] =torch.cat(
[loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(__snake_case )], dim=1 )
A__ : Optional[int] =torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(__snake_case )], dim=0 )
A__ : str =torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(__snake_case )], dim=1 )
A__ : List[str] =torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(__snake_case )], dim=0 )
A__ : List[Any] =inv_freq
for k, v in state_dict.items():
A__ : Optional[Any] =filename
param_count += v.numel()
torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) )
A__ : Tuple =f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
A__ : Tuple ={
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
A__ : Any ={
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__snake_case )], dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__snake_case )], dim=0 ),
}
for k, v in state_dict.items():
A__ : int =filename
param_count += v.numel()
torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) )
# Write configs
A__ : Union[str, Any] ={"""total_size""": param_count * 2}
write_json(__snake_case, os.path.join(__snake_case, """pytorch_model.bin.index.json""" ) )
A__ : Optional[Any] =params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
A__ : List[Any] =params["""multiple_of"""] if """multiple_of""" in params else 256
A__ : int =LlamaConfig(
hidden_size=__snake_case, intermediate_size=compute_intermediate_size(__snake_case, __snake_case, __snake_case ), num_attention_heads=params["""n_heads"""], num_hidden_layers=params["""n_layers"""], rms_norm_eps=params["""norm_eps"""], num_key_value_heads=__snake_case, )
config.save_pretrained(__snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
A__ : List[Any] =LlamaForCausalLM.from_pretrained(__snake_case, torch_dtype=torch.floataa, low_cpu_mem_usage=__snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__snake_case, safe_serialization=__snake_case )
shutil.rmtree(__snake_case )
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Dict ) -> Tuple:
"""simple docstring"""
A__ : List[Any] =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
A__ : List[str] =tokenizer_class(__snake_case )
tokenizer.save_pretrained(__snake_case )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[str] =argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""", help="""Location of LLaMA weights, which contains tokenizer.model and model folders""", )
parser.add_argument(
"""--model_size""", choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""], )
parser.add_argument(
"""--output_dir""", help="""Location to write HF model and tokenizer""", )
parser.add_argument("""--safe_serialization""", type=__snake_case, help="""Whether or not to save using `safetensors`.""" )
A__ : Any =parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, )
A__ : List[Any] =os.path.join(args.input_dir, """tokenizer.model""" )
write_tokenizer(args.output_dir, __snake_case )
if __name__ == "__main__":
main()
| 136 | 1 |
from __future__ import annotations
def snake_case_ ( snake_case , snake_case = None , snake_case = None ) -> None:
if start is None:
lowercase__: str = 0
if end is None:
lowercase__: Union[str, Any] = len(snake_case ) - 1
if start >= end:
return
lowercase__: Optional[Any] = (start + end) // 2
slowsort(snake_case , snake_case , snake_case )
slowsort(snake_case , mid + 1 , snake_case )
if sequence[end] < sequence[mid]:
lowercase__ , lowercase__: Optional[Any] = sequence[mid], sequence[end]
slowsort(snake_case , snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 196 |
def snake_case_ ( snake_case ) -> int:
if n == 1 or not isinstance(snake_case , snake_case ):
return 0
elif n == 2:
return 1
else:
lowercase__: Optional[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def snake_case_ ( snake_case ) -> int:
lowercase__: int = 0
lowercase__: int = 2
while digits < n:
index += 1
lowercase__: Tuple = len(str(fibonacci(snake_case ) ) )
return index
def snake_case_ ( snake_case = 10_00 ) -> int:
return fibonacci_digits_index(snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 196 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__UpperCAmelCase = pytest.mark.integration
__UpperCAmelCase = {'''comet'''}
__UpperCAmelCase = importlib.util.find_spec('''fairseq''') is not None
__UpperCAmelCase = {'''code_eval'''}
__UpperCAmelCase = os.name == '''nt'''
__UpperCAmelCase = {'''bertscore''', '''frugalscore''', '''perplexity'''}
__UpperCAmelCase = importlib.util.find_spec('''transformers''') is not None
def __lowerCamelCase ( __magic_name__ : str ):
@wraps(__magic_name__ )
def wrapper(self : List[Any] , __magic_name__ : Optional[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , __magic_name__ )
return wrapper
def __lowerCamelCase ( __magic_name__ : str ):
@wraps(__magic_name__ )
def wrapper(self : Tuple , __magic_name__ : List[str] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , __magic_name__ )
return wrapper
def __lowerCamelCase ( __magic_name__ : Dict ):
@wraps(__magic_name__ )
def wrapper(self : str , __magic_name__ : Tuple ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , __magic_name__ )
return wrapper
def __lowerCamelCase ( ):
a__: Dict =[metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_a , _a , _a )
@local
class lowerCamelCase__ ( parameterized.TestCase ):
_lowerCAmelCase = {}
_lowerCAmelCase = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def _lowerCamelCase ( self : List[str] , _a : Optional[int] ):
a__: Optional[Any] ="[...]"
a__: Optional[int] =importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , _a ) ).module_path )
a__: Union[str, Any] =datasets.load.import_main_class(metric_module.__name__ , dataset=_a )
# check parameters
a__: Dict =inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_a , metric_module.__name__ ):
with self.use_local_metrics():
try:
a__: Optional[int] =doctest.testmod(_a , verbose=_a , raise_on_error=_a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def _lowerCamelCase ( self : Optional[int] , _a : Union[str, Any] ):
a__: Dict ="[...]"
a__: str =importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , _a ) ).module_path )
# run doctest
with self.use_local_metrics():
a__: int =doctest.testmod(_a , verbose=_a , raise_on_error=_a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def _lowerCamelCase ( self : List[Any] , _a : Tuple , _a : str ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_a ):
yield
else:
yield
@contextmanager
def _lowerCamelCase ( self : Optional[int] ):
def load_local_metric(_a : Optional[int] , *_a : List[Any] , **_a : int ):
return load_metric(os.path.join("metrics" , _a ) , *_a , **_a )
with patch("datasets.load_metric" ) as mock_load_metric:
a__: str =load_local_metric
yield
@classmethod
def _lowerCamelCase ( cls : Tuple , _a : int ):
def wrapper(_a : Optional[Any] ):
a__: Union[str, Any] =contextmanager(_a )
a__: Dict =patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def __lowerCamelCase ( __magic_name__ : str ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class lowerCamelCase__ ( _a ):
def _lowerCamelCase ( self : List[Any] , _a : int ):
assert len(input_dict["input_ids"] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
a__: int =MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def __lowerCamelCase ( __magic_name__ : Optional[int] ):
import torch
def bert_cos_score_idf(__magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , *__magic_name__ : Optional[int] , **__magic_name__ : Optional[Any] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__magic_name__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
a__: List[str] =bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def __lowerCamelCase ( __magic_name__ : Any ):
def load_from_checkpoint(__magic_name__ : Optional[Any] ):
class lowerCamelCase__ :
def _lowerCamelCase ( self : str , _a : int , *_a : List[Any] , **_a : Optional[Any] ):
assert len(_a ) == 2
a__: Optional[Any] =[0.1_9, 0.9_2]
return scores, sum(_a ) / len(_a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
a__: Union[str, Any] =None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
a__: Any =load_from_checkpoint
yield
def __lowerCamelCase ( ):
a__: Any =load_metric(os.path.join("metrics" , "seqeval" ) )
a__: List[str] ="ERROR"
a__: List[str] =F"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(__magic_name__ , match=re.escape(__magic_name__ ) ):
metric.compute(predictions=[] , references=[] , scheme=__magic_name__ )
| 42 |
from __future__ import annotations
def __lowerCamelCase ( __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : str ): # noqa: E741
while r - l > 1:
a__: Any =(l + r) // 2
if v[m] >= key:
a__: Any =m
else:
a__: Optional[int] =m # noqa: E741
return r
def __lowerCamelCase ( __magic_name__ : list[int] ):
if len(__magic_name__ ) == 0:
return 0
a__: Tuple =[0] * len(__magic_name__ )
a__: Optional[int] =1
a__: Optional[Any] =v[0]
for i in range(1 , len(__magic_name__ ) ):
if v[i] < tail[0]:
a__: Union[str, Any] =v[i]
elif v[i] > tail[length - 1]:
a__: Optional[int] =v[i]
length += 1
else:
a__: Dict =v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 | 1 |
import re
def A_ ( a ):
"""simple docstring"""
if len(re.findall('[ATCG]' , a ) ) != len(a ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : List[str] = logging.get_logger(__name__)
class _A ( __magic_name__ , __magic_name__):
SCREAMING_SNAKE_CASE : Dict = '''maskformer-swin'''
SCREAMING_SNAKE_CASE : Dict = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE_ : List[str] = patch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = embed_dim
SCREAMING_SNAKE_CASE_ : Dict = depths
SCREAMING_SNAKE_CASE_ : Dict = len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = num_heads
SCREAMING_SNAKE_CASE_ : List[Any] = window_size
SCREAMING_SNAKE_CASE_ : List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE_ : Tuple = qkv_bias
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = drop_path_rate
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ : int = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ : str = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
SCREAMING_SNAKE_CASE_ : List[str] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 253 | 1 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _snake_case ( __SCREAMING_SNAKE_CASE ):
def __init__( self: Optional[Any] , *__lowerCamelCase: Any , **__lowerCamelCase: str ) -> Optional[int]:
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
self.check_model_type(UpperCamelCase__ )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[Any]=None , __lowerCamelCase: int=None , __lowerCamelCase: Optional[Any]=None , **__lowerCamelCase: int ) -> Dict:
__UpperCAmelCase : Any = {}, {}
if padding is not None:
__UpperCAmelCase : str = padding
if truncation is not None:
__UpperCAmelCase : Optional[int] = truncation
if top_k is not None:
__UpperCAmelCase : Any = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Union[str, Any] , __lowerCamelCase: Union["Image.Image", str] , __lowerCamelCase: str = None , **__lowerCamelCase: Dict ) -> Tuple:
if isinstance(UpperCamelCase__ , (Image.Image, str) ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase : Any = {'''image''': image, '''question''': question}
else:
__UpperCAmelCase : Tuple = image
__UpperCAmelCase : Union[str, Any] = super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
return results
def _lowerCamelCase ( self: str , __lowerCamelCase: str , __lowerCamelCase: Optional[Any]=False , __lowerCamelCase: List[Any]=False ) -> List[str]:
__UpperCAmelCase : Optional[int] = load_image(inputs["image"] )
__UpperCAmelCase : Tuple = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )
__UpperCAmelCase : Tuple = self.image_processor(images=UpperCamelCase__ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase__ )
return model_inputs
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Optional[int] ) -> Tuple:
__UpperCAmelCase : Any = self.model(**UpperCamelCase__ )
return model_outputs
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int]=5 ) -> Any:
if top_k > self.model.config.num_labels:
__UpperCAmelCase : List[str] = self.model.config.num_labels
if self.framework == "pt":
__UpperCAmelCase : Optional[Any] = model_outputs.logits.sigmoid()[0]
__UpperCAmelCase : Union[str, Any] = probs.topk(UpperCamelCase__ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
__UpperCAmelCase : List[Any] = scores.tolist()
__UpperCAmelCase : Optional[int] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
| 367 | import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _snake_case ( _lowercase ):
lowerCamelCase__: str = "detr"
lowerCamelCase__: Dict = ["past_key_values"]
lowerCamelCase__: str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: List[str] , __lowerCamelCase: List[Any]=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=1_00 , __lowerCamelCase: Union[str, Any]=6 , __lowerCamelCase: Union[str, Any]=20_48 , __lowerCamelCase: Dict=8 , __lowerCamelCase: Optional[int]=6 , __lowerCamelCase: List[Any]=20_48 , __lowerCamelCase: int=8 , __lowerCamelCase: Tuple=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Any=True , __lowerCamelCase: Tuple="relu" , __lowerCamelCase: Tuple=2_56 , __lowerCamelCase: Dict=0.1 , __lowerCamelCase: Union[str, Any]=0.0 , __lowerCamelCase: Optional[int]=0.0 , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1.0 , __lowerCamelCase: List[str]=False , __lowerCamelCase: Dict="sine" , __lowerCamelCase: Optional[int]="resnet50" , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: int=False , __lowerCamelCase: Union[str, Any]=1 , __lowerCamelCase: Tuple=5 , __lowerCamelCase: int=2 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Dict=1 , __lowerCamelCase: Union[str, Any]=5 , __lowerCamelCase: Dict=2 , __lowerCamelCase: int=0.1 , **__lowerCamelCase: str , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : List[Any] = backbone_config.get("model_type" )
__UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[str] = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = None, None, None
__UpperCAmelCase : Any = use_timm_backbone
__UpperCAmelCase : Optional[Any] = backbone_config
__UpperCAmelCase : Optional[Any] = num_channels
__UpperCAmelCase : List[Any] = num_queries
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Optional[Any] = encoder_ffn_dim
__UpperCAmelCase : Dict = encoder_layers
__UpperCAmelCase : List[Any] = encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Tuple = decoder_layers
__UpperCAmelCase : int = decoder_attention_heads
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Dict = attention_dropout
__UpperCAmelCase : Optional[Any] = activation_dropout
__UpperCAmelCase : int = activation_function
__UpperCAmelCase : Any = init_std
__UpperCAmelCase : str = init_xavier_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Tuple = decoder_layerdrop
__UpperCAmelCase : List[Any] = encoder_layers
__UpperCAmelCase : Optional[Any] = auxiliary_loss
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = backbone
__UpperCAmelCase : str = use_pretrained_backbone
__UpperCAmelCase : Dict = dilation
# Hungarian matcher
__UpperCAmelCase : Optional[int] = class_cost
__UpperCAmelCase : Optional[Any] = bbox_cost
__UpperCAmelCase : Optional[int] = giou_cost
# Loss coefficients
__UpperCAmelCase : Any = mask_loss_coefficient
__UpperCAmelCase : Any = dice_loss_coefficient
__UpperCAmelCase : Any = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _lowerCamelCase ( self: Dict ) -> int:
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self: str ) -> int:
return self.d_model
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: PretrainedConfig , **__lowerCamelCase: List[Any] ) -> List[Any]:
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: str ) -> Dict[str, any]:
__UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__UpperCAmelCase : int = self.backbone_config.to_dict()
__UpperCAmelCase : List[str] = self.__class__.model_type
return output
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = version.parse("1.11" )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self: Optional[Any] ) -> float:
return 1e-5
@property
def _lowerCamelCase ( self: List[str] ) -> int:
return 12
| 342 | 0 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCAmelCase__ ( a__: List[str] , a__: Any , a__: int=[] ) -> int:
'''simple docstring'''
_UpperCAmelCase = size[0] - overlap_pixels * 2
_UpperCAmelCase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_UpperCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_5_5
_UpperCAmelCase = np.pad(lowerCamelCase_ , mode='linear_ramp' , pad_width=lowerCamelCase_ , end_values=0 )
if "l" in remove_borders:
_UpperCAmelCase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_UpperCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_UpperCAmelCase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_UpperCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCAmelCase__ ( a__: List[Any] , a__: Tuple , a__: Union[str, Any] ) -> Any:
'''simple docstring'''
return max(lowerCamelCase_ , min(lowerCamelCase_ , lowerCamelCase_ ) )
def lowerCAmelCase__ ( a__: [int] , a__: [int] , a__: [int] ) -> Optional[int]:
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowerCAmelCase__ ( a__: [int] , a__: int , a__: [int] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = list(lowerCamelCase_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_UpperCAmelCase = clamp_rect(lowerCamelCase_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowerCAmelCase__ ( a__: List[Any] , a__: Dict , a__: Optional[int] , a__: str ) -> int:
'''simple docstring'''
_UpperCAmelCase = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowerCamelCase_ , (original_slice, 0) )
return result
def lowerCAmelCase__ ( a__: Union[str, Any] , a__: Dict ) -> Any:
'''simple docstring'''
_UpperCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
_UpperCAmelCase = tile.crop(lowerCamelCase_ )
return tile
def lowerCAmelCase__ ( a__: Union[str, Any] , a__: Tuple ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = n % d
return n - divisor
class __a ( lowerCamelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 350 , ) -> List[str]:
"""simple docstring"""
super().__init__(
vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , max_noise_level=__lowerCamelCase , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_UpperCAmelCase = add_overlap_rect(__lowerCamelCase , __lowerCamelCase , image.size )
_UpperCAmelCase = image.crop(__lowerCamelCase )
_UpperCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_UpperCAmelCase = translated_slice_x - (original_image_slice / 2)
_UpperCAmelCase = max(0 , __lowerCamelCase )
_UpperCAmelCase = squeeze_tile(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase = to_input.size
_UpperCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_UpperCAmelCase = super(__lowerCamelCase , self ).__call__(image=__lowerCamelCase , **__lowerCamelCase ).images[0]
_UpperCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_UpperCAmelCase = unsqueeze_tile(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_UpperCAmelCase = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
_UpperCAmelCase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__lowerCamelCase ) , mode='L' , )
final_image.paste(
__lowerCamelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __lowerCamelCase )
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 75 , _SCREAMING_SNAKE_CASE = 9.0 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 32 , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
_UpperCAmelCase = math.ceil(image.size[0] / tile_size )
_UpperCAmelCase = math.ceil(image.size[1] / tile_size )
_UpperCAmelCase = tcx * tcy
_UpperCAmelCase = 0
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
self._process_tile(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prompt=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , noise_level=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def lowerCAmelCase__ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
_UpperCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCamelCase_ , revision='fp16' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to('cuda' )
_UpperCAmelCase = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(a__: Union[str, Any] ):
print(F'''progress: {obj["progress"]:.4f}''' )
obj["image"].save('diffusers_library_progress.jpg' )
_UpperCAmelCase = pipe(image=lowerCamelCase_ , prompt='Black font, white background, vector' , noise_level=4_0 , callback=lowerCamelCase_ )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 329 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""Speech2TextFeatureExtractor"""
snake_case_ ="""Speech2TextTokenizer"""
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> str:
"""simple docstring"""
super().__init__(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : int = self.feature_extractor
lowerCAmelCase__ : List[str] = False
def __call__(self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Dict:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase ,**__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowerCAmelCase__ : Optional[Any] = kwargs.pop('''raw_speech''' )
else:
lowerCAmelCase__ : str = kwargs.pop('''audio''' ,__lowerCamelCase )
lowerCAmelCase__ : List[str] = kwargs.pop('''sampling_rate''' ,__lowerCamelCase )
lowerCAmelCase__ : List[str] = kwargs.pop('''text''' ,__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowerCAmelCase__ : Union[str, Any] = args[0]
lowerCAmelCase__ : str = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowerCAmelCase__ : str = self.feature_extractor(__lowerCamelCase ,*__lowerCamelCase ,sampling_rate=__lowerCamelCase ,**__lowerCamelCase )
if text is not None:
lowerCAmelCase__ : Any = self.tokenizer(__lowerCamelCase ,**__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ : str = encodings['''input_ids''']
return inputs
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase ,**__lowerCamelCase )
@contextmanager
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Union[str, Any] = self.tokenizer
yield
lowerCAmelCase__ : List[str] = self.feature_extractor
lowerCAmelCase__ : Any = False
| 129 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'luke'
def __init__( self : Dict ,__lowerCamelCase : Optional[Any]=5_02_67 ,__lowerCamelCase : str=50_00_00 ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : int=2_56 ,__lowerCamelCase : Optional[int]=12 ,__lowerCamelCase : Tuple=12 ,__lowerCamelCase : Any=30_72 ,__lowerCamelCase : Any="gelu" ,__lowerCamelCase : Any=0.1 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : Tuple=5_12 ,__lowerCamelCase : int=2 ,__lowerCamelCase : Optional[int]=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : Any=1 ,__lowerCamelCase : Dict=0 ,__lowerCamelCase : Any=2 ,**__lowerCamelCase : str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase )
a = vocab_size
a = entity_vocab_size
a = hidden_size
a = entity_emb_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = use_entity_aware_attention
a = classifier_dropout
| 368 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str | Literal[False]:
"""simple docstring"""
a = list(snake_case_ )
a = list(snake_case_ )
a = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
a = '''_'''
if count > 1:
return False
else:
return "".join(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[str]:
"""simple docstring"""
a = []
while True:
a = ['''$'''] * len(snake_case_ )
a = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1, len(snake_case_ ) ):
a = compare_string(binary[i], binary[j] )
if k is False:
a = '''*'''
a = '''*'''
temp.append('''X''' )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
a = list(set(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]:
"""simple docstring"""
a = []
for minterm in minterms:
a = ''''''
for _ in range(snake_case_ ):
a = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> bool:
"""simple docstring"""
a = list(snake_case_ )
a = list(snake_case_ )
a = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]:
"""simple docstring"""
a = []
a = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
a = 0
a = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
a = j
if count == 1:
a = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
a = 0
temp.append(prime_implicants[i] )
while True:
a = 0
a = -1
a = 0
for i in range(len(snake_case_ ) ):
a = chart[i].count(1 )
if count_n > max_n:
a = count_n
a = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
a = 0
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[list[int]]:
"""simple docstring"""
a = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
a = prime_implicants[i].count('''_''' )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i], binary[j], snake_case_ ):
a = 1
return chart
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
a = int(input('''Enter the no. of variables\n''' ) )
a = [
float(snake_case_ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
a = decimal_to_binary(snake_case_, snake_case_ )
a = check(snake_case_ )
print('''Prime Implicants are:''' )
print(snake_case_ )
a = prime_implicant_chart(snake_case_, snake_case_ )
a = selection(snake_case_, snake_case_ )
print('''Essential Prime Implicants are:''' )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 330 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Any = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = GPTSwaTokenizer
a__ = False
a__ = True
a__ = False
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ = GPTSwaTokenizer(UpperCamelCase__ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : int , UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
__magic_name__ = """This is a test"""
__magic_name__ = """This is a test"""
return input_text, output_text
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__magic_name__ = """<s>"""
__magic_name__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(UpperCamelCase__ ) , 2000 )
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
__magic_name__ = GPTSwaTokenizer(UpperCamelCase__ )
__magic_name__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [465, 287, 265, 631, 842] )
__magic_name__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
UpperCamelCase__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
__magic_name__ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__magic_name__ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
# fmt: off
self.assertListEqual(
UpperCamelCase__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__magic_name__ = GPTSwaTokenizer(UpperCamelCase__ )
__magic_name__ = ["""This is a test""", """I was born in 92000, and this is falsé."""]
__magic_name__ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertListEqual(tokenizer.encode_fast(UpperCamelCase__ ) , UpperCamelCase__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(tokenizer.decode_fast(UpperCamelCase__ ) , UpperCamelCase__ )
@slow
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
__magic_name__ = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=UpperCamelCase__ , )
| 88 | """simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__)
def _lowerCamelCase( a ):
__a = git.Repo(search_parent_directories=a )
__a = {
"repo_id": str(a ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(a , "git_log.json" ) , "w" ) as f:
json.dump(a , a , indent=4 )
def _lowerCamelCase( a ):
if params.n_gpu <= 0:
__a = 0
__a = -1
__a = True
__a = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
__a = int(os.environ["WORLD_SIZE"] )
__a = int(os.environ["N_GPU_NODE"] )
__a = int(os.environ["RANK"] )
# number of nodes / node ID
__a = params.world_size // params.n_gpu_per_node
__a = params.global_rank // params.n_gpu_per_node
__a = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
__a = 1
__a = 0
__a = 0
__a = 0
__a = 1
__a = 1
__a = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__a = params.node_id == 0 and params.local_rank == 0
__a = params.n_nodes > 1
# summary
__a = F"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" , backend="nccl" , )
def _lowerCamelCase( a ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 261 | 0 |
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCamelCase__( )->List[str]:
A__ = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=_lowercase )
A__ = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=_lowercase )
env_command_parser(subparsers=_lowercase )
launch_command_parser(subparsers=_lowercase )
tpu_command_parser(subparsers=_lowercase )
test_command_parser(subparsers=_lowercase )
# Let's go
A__ = parser.parse_args()
if not hasattr(_lowercase , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(_lowercase )
if __name__ == "__main__":
main()
| 362 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
__SCREAMING_SNAKE_CASE = '''CIDAS/clipseg-rd64-refined'''
__SCREAMING_SNAKE_CASE = '''image_segmenter'''
__SCREAMING_SNAKE_CASE = CLIPSegForImageSegmentation
__SCREAMING_SNAKE_CASE = ['''image''', '''text''']
__SCREAMING_SNAKE_CASE = ['''image''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''vision'''] )
super().__init__(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
return self.pre_processor(text=[label],images=[image],padding=__lowerCamelCase,return_tensors='''pt''' )
def UpperCamelCase ( self,__lowerCamelCase ):
with torch.no_grad():
A__ = self.model(**__lowerCamelCase ).logits
return logits
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = outputs.cpu().detach().numpy()
A__ = 0
A__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 39 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase: Tuple = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Dict = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
_lowercase: List[str] = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
_lowercase: Union[str, Any] = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
_lowercase: Optional[int] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_lowercase: List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 227 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
lowercase__ = []
lowercase__ = []
for rt in rc.restypes:
lowercase__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
lowercase__ = {name: i for i, name in enumerate(lowerCamelCase_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.intaa , device=protein['''aatype'''].device , )
lowercase__ = torch.tensor(
lowerCamelCase_ , dtype=torch.floataa , device=protein['''aatype'''].device , )
lowercase__ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ = restype_atomaa_mask[protein_aatype]
lowercase__ = residx_atomaa_mask
lowercase__ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
lowercase__ = rc.restype_atoa[restype_letter]
lowercase__ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ = rc.atom_order[atom_name]
lowercase__ = 1
lowercase__ = restype_atomaa_mask[protein_aatype]
lowercase__ = residx_atomaa_mask
return protein
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = tree_map(lambda lowerCamelCase_ : torch.tensor(lowerCamelCase_ , device=batch['''aatype'''].device ) , lowerCamelCase_ , np.ndarray )
lowercase__ = tensor_tree_map(lambda lowerCamelCase_ : np.array(lowerCamelCase_ ) , make_atomaa_masks(lowerCamelCase_ ) )
return out
| 207 | 0 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
try:
with open(A__ , 'rb' ) as flax_state_f:
UpperCamelCase = from_bytes(A__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(A__ ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(A__ , A__ )
def __lowerCamelCase ( A__ , A__ ) -> int:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
UpperCamelCase = flatten_dict(jax.tree_util.tree_map(lambda A__ : x.dtype == jnp.bfloataa , A__ ) ).values()
if any(A__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
UpperCamelCase = jax.tree_util.tree_map(
lambda A__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , A__ )
UpperCamelCase = ''
UpperCamelCase = flatten_dict(A__ , sep='.' )
UpperCamelCase = pt_model.state_dict()
# keep track of unexpected & missing keys
UpperCamelCase = []
UpperCamelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
UpperCamelCase = flax_key_tuple_array[:-1] + ['weight']
UpperCamelCase = jnp.transpose(A__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
UpperCamelCase = flax_key_tuple_array[:-1] + ['weight']
UpperCamelCase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
UpperCamelCase = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(A__ ):
UpperCamelCase = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
UpperCamelCase = '.'.join(A__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCamelCase = np.asarray(A__ ) if not isinstance(A__ , np.ndarray ) else flax_tensor
UpperCamelCase = torch.from_numpy(A__ )
# remove from missing keys
missing_keys.remove(A__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(A__ )
pt_model.load_state_dict(A__ )
# re-transform missing_keys to list
UpperCamelCase = list(A__ )
if len(A__ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(A__ ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
' use it for predictions and inference.' )
return pt_model
| 249 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Optional[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 249 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : int = StableUnCLIPImgaImgPipeline
_lowercase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowercase : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase : Union[str, Any] = frozenset([] )
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = 32
SCREAMING_SNAKE_CASE = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=a , projection_dim=a , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = StableUnCLIPImageNormalizer(embedding_dim=a)
SCREAMING_SNAKE_CASE = DDPMScheduler(beta_schedule='squaredcos_cap_v2')
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ))
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a , layers_per_block=1 , upcast_attention=a , use_linear_projection=a , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=a , steps_offset=1 , )
torch.manual_seed(0)
SCREAMING_SNAKE_CASE = AutoencoderKL()
SCREAMING_SNAKE_CASE = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def SCREAMING_SNAKE_CASE__ ( self , a , a=0 , a=True) -> Tuple:
if str(a).startswith('mps'):
SCREAMING_SNAKE_CASE = torch.manual_seed(a)
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=a).manual_seed(a)
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(a)).to(a)
if pil_image:
SCREAMING_SNAKE_CASE = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = input_image.clamp(0 , 1)
SCREAMING_SNAKE_CASE = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
SCREAMING_SNAKE_CASE = DiffusionPipeline.numpy_to_pil(a)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline(**a)
SCREAMING_SNAKE_CASE = sd_pipe.to(a)
sd_pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(a)
inputs.update({'image_embeds': None})
SCREAMING_SNAKE_CASE = sd_pipe(**a).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=a)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=a)
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy')
SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE = torch.Generator(device='cpu').manual_seed(0)
SCREAMING_SNAKE_CASE = pipe(a , 'anime turle' , generator=a , output_type='np')
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy')
SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE = torch.Generator(device='cpu').manual_seed(0)
SCREAMING_SNAKE_CASE = pipe(a , 'anime turle' , generator=a , output_type='np')
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa)
SCREAMING_SNAKE_CASE = pipe.to(a)
pipe.set_progress_bar_config(disable=a)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE = pipe(
a , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 137 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ : List[Any] = logging.get_logger(__name__)
a_ : str = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class _snake_case ( A__ ):
_lowercase : Any = '''deberta-v2'''
def __init__( self , a=12_8100 , a=1536 , a=24 , a=24 , a=6144 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=0 , a=0.02 , a=1E-7 , a=False , a=-1 , a=0 , a=True , a=None , a=0 , a="gelu" , **a , ) -> List[Any]:
super().__init__(**a)
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = relative_attention
SCREAMING_SNAKE_CASE = max_relative_positions
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = position_biased_input
# Backwards compatibility
if type(a) == str:
SCREAMING_SNAKE_CASE = [x.strip() for x in pos_att_type.lower().split('|')]
SCREAMING_SNAKE_CASE = pos_att_type
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = kwargs.get('pooler_hidden_size' , a)
SCREAMING_SNAKE_CASE = pooler_dropout
SCREAMING_SNAKE_CASE = pooler_hidden_act
class _snake_case ( A__ ):
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , a , a = -1 , a = -1 , a = -1 , a = False , a = None , a = 3 , a = 40 , a = 40 , a = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(preprocessor=a , framework=a)
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 137 | 1 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _a ( _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__UpperCAmelCase : Union[str, Any] = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(UpperCAmelCase__ ):
os.makedirs(UpperCAmelCase__ )
__UpperCAmelCase : List[Any] = model.state_dict()
def to_tf_var_name(_lowercase : Union[str, Any] ):
for patt, repl in iter(UpperCAmelCase__ ):
__UpperCAmelCase : Any = name.replace(UpperCAmelCase__ , UpperCAmelCase__ )
return F'bert/{name}'
def create_tf_var(_lowercase : int , _lowercase : Tuple , _lowercase : Optional[Any] ):
__UpperCAmelCase : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
__UpperCAmelCase : Tuple = tf.get_variable(dtype=UpperCAmelCase__ , shape=tensor.shape , name=UpperCAmelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCAmelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__UpperCAmelCase : Union[str, Any] = to_tf_var_name(UpperCAmelCase__ )
__UpperCAmelCase : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__UpperCAmelCase : Optional[Any] = torch_tensor.T
__UpperCAmelCase : List[str] = create_tf_var(tensor=UpperCAmelCase__ , name=UpperCAmelCase__ , session=UpperCAmelCase__ )
tf.keras.backend.set_value(UpperCAmelCase__ , UpperCAmelCase__ )
__UpperCAmelCase : List[str] = session.run(UpperCAmelCase__ )
print(F'Successfully created {tf_name}: {np.allclose(UpperCAmelCase__ , UpperCAmelCase__ )}' )
__UpperCAmelCase : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def _a ( _lowercase : int=None ):
'''simple docstring'''
__UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , required=UpperCAmelCase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='''Directory in which to save tensorflow model''' )
__UpperCAmelCase : Any = parser.parse_args(UpperCAmelCase__ )
__UpperCAmelCase : Dict = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCAmelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main() | 363 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : Any , snake_case : Any , snake_case : Optional[int]=13 , snake_case : List[str]=7 , snake_case : List[str]=True , snake_case : List[Any]=True , snake_case : int=True , snake_case : Tuple=True , snake_case : int=99 , snake_case : Any=16 , snake_case : Dict=36 , snake_case : Any=6 , snake_case : Dict=6 , snake_case : Dict=6 , snake_case : int=37 , snake_case : int="gelu" , snake_case : str=0.1 , snake_case : Any=0.1 , snake_case : Dict=512 , snake_case : List[Any]=16 , snake_case : Any=2 , snake_case : Any=0.02 , snake_case : Optional[int]=3 , snake_case : List[Any]=4 , snake_case : List[str]=None , ) -> Union[str, Any]:
__UpperCAmelCase : str = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : int = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : List[Any] = use_token_type_ids
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Optional[int] = embedding_size
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : List[Any] = num_hidden_groups
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : str = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : int = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Dict = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : int = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase__ ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : int ) -> Optional[int]:
__UpperCAmelCase : List[Any] = AlbertModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Tuple = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
__UpperCAmelCase : List[str] = model(snake_case , token_type_ids=snake_case )
__UpperCAmelCase : str = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : List[str] , snake_case : str , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : Optional[int] ) -> Optional[int]:
__UpperCAmelCase : str = AlbertForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , sentence_order_label=snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase__ ( self : Dict , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Any , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Any , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Dict = AlbertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : Tuple , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple ) -> int:
__UpperCAmelCase : Optional[Any] = AlbertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Tuple , snake_case : List[str] , snake_case : Dict , snake_case : Optional[int] , snake_case : Dict , snake_case : int , snake_case : Optional[int] , snake_case : Optional[Any] ) -> Any:
__UpperCAmelCase : Optional[int] = self.num_labels
__UpperCAmelCase : Any = AlbertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : Tuple , snake_case : List[Any] , snake_case : Optional[int] , snake_case : str , snake_case : Dict , snake_case : Union[str, Any] , snake_case : List[str] ) -> int:
__UpperCAmelCase : Optional[int] = self.num_labels
__UpperCAmelCase : Optional[int] = AlbertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : Tuple , snake_case : List[Any] , snake_case : Dict , snake_case : int , snake_case : List[Any] , snake_case : List[Any] , snake_case : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[int] = self.num_choices
__UpperCAmelCase : List[Any] = AlbertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
__UpperCAmelCase : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Any = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Any = True
def lowerCamelCase__ ( self : Optional[int] , snake_case : Any , snake_case : Dict , snake_case : Tuple=False ) -> Optional[Any]:
__UpperCAmelCase : Any = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
__UpperCAmelCase : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case )
__UpperCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def lowerCamelCase__ ( self : Dict ) -> int:
__UpperCAmelCase : List[Any] = AlbertModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Any ) -> Any:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> str:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase__ ( self : str ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Tuple = type
self.model_tester.create_and_check_model(*snake_case )
@slow
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[Any] = AlbertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = AlbertModel.from_pretrained('''albert-base-v2''' )
__UpperCAmelCase : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCAmelCase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case )[0]
__UpperCAmelCase : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case )
__UpperCAmelCase : int = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) ) | 240 | 0 |
def UpperCamelCase ( lowerCAmelCase__ = 100_0000 ):
'''simple docstring'''
lowercase = set(range(3 , lowerCAmelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCAmelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCAmelCase__ , lowerCAmelCase__ ) ) )
lowercase = [float(lowerCAmelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase__ , limit + 1 , lowerCAmelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 101 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
snake_case_ = TypeVar('''T''')
snake_case_ = TypeVar('''U''')
class SCREAMING_SNAKE_CASE__ (Generic[T, U] ):
def __init__( self , a , a):
lowercase__ : List[Any] = key
lowercase__ : List[Any] = val
lowercase__ : DoubleLinkedListNode[T, U] | None = None
lowercase__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self):
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next)}, has prev: {bool(self.prev)}"""
)
class SCREAMING_SNAKE_CASE__ (Generic[T, U] ):
def __init__( self):
lowercase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(a , a)
lowercase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(a , a)
lowercase__ , lowercase__ : Union[str, Any] = self.rear, self.head
def __repr__( self):
lowercase__ : Any = ['DoubleLinkedList']
lowercase__ : List[str] = self.head
while node.next is not None:
rep.append(str(a))
lowercase__ : Tuple = node.next
rep.append(str(self.rear))
return ",\n ".join(a)
def snake_case_ ( self , a):
lowercase__ : Optional[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowercase__ : Dict = node
lowercase__ : int = previous
lowercase__ : Union[str, Any] = node
lowercase__ : Optional[int] = self.rear
def snake_case_ ( self , a):
if node.prev is None or node.next is None:
return None
lowercase__ : Union[str, Any] = node.next
lowercase__ : Tuple = node.prev
lowercase__ : Union[str, Any] = None
lowercase__ : List[Any] = None
return node
class SCREAMING_SNAKE_CASE__ (Generic[T, U] ):
__lowerCamelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , a):
lowercase__ : DoubleLinkedList[T, U] = DoubleLinkedList()
lowercase__ : Optional[Any] = capacity
lowercase__ : Union[str, Any] = 0
lowercase__ : Tuple = 0
lowercase__ : int = 0
lowercase__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self):
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , a):
return key in self.cache
def snake_case_ ( self , a):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowercase__ : DoubleLinkedListNode[T, U] = self.cache[key]
lowercase__ : str = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(a)
return node.val
self.miss += 1
return None
def snake_case_ ( self , a , a):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowercase__ : Optional[int] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(a) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowercase__ : Optional[Any] = DoubleLinkedListNode(a , a)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowercase__ : Any = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
lowercase__ : Union[str, Any] = value
self.list.add(a)
@classmethod
def snake_case_ ( cls , a = 128):
def cache_decorator_inner(a) -> Callable[..., U]:
def cache_decorator_wrapper(*a) -> U:
if func not in cls.decorator_function_to_instance_map:
lowercase__ : Dict = LRUCache(a)
lowercase__ : str = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
lowercase__ : str = func(*a)
cls.decorator_function_to_instance_map[func].put(args[0] , a)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(a , 'cache_info' , a) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214 | 0 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> bool:
'''simple docstring'''
__UpperCamelCase : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4)) | 151 |
lowercase : Optional[int] = 9.8_0_6_6_5
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float = g) -> float:
'''simple docstring'''
if fluid_density <= 0:
raise ValueError("Impossible fluid density")
if volume < 0:
raise ValueError("Impossible Object volume")
if gravity <= 0:
raise ValueError("Impossible Gravity")
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod() | 151 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 108 |
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# Return True if there is node that has not iterated.
UpperCamelCase :Tuple = [False] * len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = []
queue.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = True
while queue:
UpperCamelCase :Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :Optional[int] = u
return visited[t]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ):
# This array is filled by BFS and to store path
UpperCamelCase :Optional[int] = [-1] * (len(SCREAMING_SNAKE_CASE__ ))
UpperCamelCase :Optional[int] = 0
while bfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Dict = float('''Inf''' )
UpperCamelCase :str = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase :Optional[Any] = min(SCREAMING_SNAKE_CASE__ , graph[parent[s]][s] )
UpperCamelCase :Any = parent[s]
max_flow += path_flow
UpperCamelCase :Tuple = sink
while v != source:
UpperCamelCase :List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase :Any = parent[v]
return max_flow
__snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__snake_case , __snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 259 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
lowercase__ = list[tuple[int, int]]
lowercase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , a_ : int , a_ : int , a_ : int , a_ : int , a_ : Node | None ):
lowerCAmelCase_ : Optional[Any] = pos_x
lowerCAmelCase_ : Any = pos_y
lowerCAmelCase_ : List[Any] = (pos_y, pos_x)
lowerCAmelCase_ : List[str] = goal_x
lowerCAmelCase_ : Dict = goal_y
lowerCAmelCase_ : List[Any] = parent
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , a_ : tuple[int, int] , a_ : tuple[int, int] ):
lowerCAmelCase_ : int = Node(start[1] , start[0] , goal[1] , goal[0] , a_ )
lowerCAmelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , a_ )
lowerCAmelCase_ : int = [self.start]
lowerCAmelCase_ : List[str] = False
def lowerCamelCase ( self : int ):
while self.node_queue:
lowerCAmelCase_ : Optional[int] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCAmelCase_ : List[Any] = True
return self.retrace_path(a_ )
lowerCAmelCase_ : Any = self.get_successors(a_ )
for node in successors:
self.node_queue.append(a_ )
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase ( self : List[str] , a_ : Node ):
lowerCAmelCase_ : Any = []
for action in delta:
lowerCAmelCase_ : List[str] = parent.pos_x + action[1]
lowerCAmelCase_ : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(a_ , a_ , self.target.pos_y , self.target.pos_x , a_ ) )
return successors
def lowerCamelCase ( self : str , a_ : Node | None ):
lowerCAmelCase_ : int = node
lowerCAmelCase_ : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase_ : int = current_node.parent
path.reverse()
return path
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , a_ : List[str] , a_ : str ):
lowerCAmelCase_ : Tuple = BreadthFirstSearch(a_ , a_ )
lowerCAmelCase_ : Optional[Any] = BreadthFirstSearch(a_ , a_ )
lowerCAmelCase_ : Dict = False
def lowerCamelCase ( self : List[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCAmelCase_ : Tuple = self.fwd_bfs.node_queue.pop(0 )
lowerCAmelCase_ : int = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCAmelCase_ : List[Any] = True
return self.retrace_bidirectional_path(
a_ , a_ )
lowerCAmelCase_ : Tuple = current_bwd_node
lowerCAmelCase_ : Tuple = current_fwd_node
lowerCAmelCase_ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(a_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(a_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(a_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCamelCase ( self : Optional[Any] , a_ : Node , a_ : Node ):
lowerCAmelCase_ : Dict = self.fwd_bfs.retrace_path(a_ )
lowerCAmelCase_ : List[str] = self.bwd_bfs.retrace_path(a_ )
bwd_path.pop()
bwd_path.reverse()
lowerCAmelCase_ : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowercase__ = (0, 0)
lowercase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase__ = time.time()
lowercase__ = BreadthFirstSearch(init, goal)
lowercase__ = bfs.search()
lowercase__ = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
lowercase__ = time.time()
lowercase__ = BidirectionalBreadthFirstSearch(init, goal)
lowercase__ = bd_bfs.search()
lowercase__ = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 161 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 161 | 1 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=64 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Dict = is_training
_lowerCamelCase : int = use_input_mask
_lowerCamelCase : List[Any] = use_token_type_ids
_lowerCamelCase : int = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Any = embedding_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : List[Any] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : str = num_labels
_lowerCamelCase : int = num_choices
_lowerCamelCase : Any = scope
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : int = None
if self.use_input_mask:
_lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : Dict = None
_lowerCamelCase : Any = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Union[str, Any] = MobileBertModel(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[int] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
_lowerCamelCase : Union[str, Any] = model(lowercase , token_type_ids=lowercase )
_lowerCamelCase : List[str] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Union[str, Any] = MobileBertForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : List[Any] = MobileBertForNextSentencePrediction(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : List[str] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : str = MobileBertForPreTraining(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : int = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , next_sentence_label=lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Any = MobileBertForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : str = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : List[str] = MobileBertForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : str = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : Optional[int] = MobileBertForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : List[str] = self.num_choices
_lowerCamelCase : Optional[int] = MobileBertForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Tuple = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self ):
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
def A_ ( self , lowercase , lowercase , lowercase=False ):
_lowerCamelCase : Optional[int] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class in get_values(lowercase ):
_lowerCamelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase )
_lowerCamelCase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase )
return inputs_dict
def A_ ( self ):
_lowerCamelCase : int = MobileBertModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase )
def A_ ( self ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase )
def A_ ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase )
def A_ ( self ):
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase )
def A_ ( self ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase )
def _snake_case ( lowercase__ ):
return torch.tensor(
lowercase__ , dtype=torch.long , device=lowercase__ , )
lowercase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : Optional[int] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowercase )
_lowerCamelCase : Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
_lowerCamelCase : Any = model(lowercase )[0]
_lowerCamelCase : Tuple = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , lowercase )
_lowerCamelCase : List[Any] = torch.tensor(
[
[
[-2.473_6526E07, 8.269_1656E04, 1.652_1838E05],
[-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00],
[2.604_7359E00, 1.567_7652E00, -1.732_4188E-01],
]
] , device=lowercase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
_lowerCamelCase : Any = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
_lowerCamelCase : str = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 96 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase__ ( a , a ):
"""simple docstring"""
lowerCAmelCase__ = "maskformer-swin"
lowerCAmelCase__ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : str , __SCREAMING_SNAKE_CASE : Tuple=224 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=96 , __SCREAMING_SNAKE_CASE : Optional[Any]=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE : Any=[3, 6, 12, 24] , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Dict=4.0 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-5 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = window_size
__SCREAMING_SNAKE_CASE = mlp_ratio
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = use_absolute_embeddings
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
__SCREAMING_SNAKE_CASE = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 267 | 0 |
import math
def UpperCamelCase ( __lowercase : int = 1_00 ):
'''simple docstring'''
A_ : List[Any] = sum(i * i for i in range(1 ,n + 1 ) )
A_ : int = int(math.pow(sum(range(1 ,n + 1 ) ) ,2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 192 | import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_UpperCAmelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Tuple ,__lowercase : str ,__lowercase : List[Any] ,__lowercase : Optional[int] ):
'''simple docstring'''
for attribute in key.split('.' ):
A_ : List[Any] = getattr(__lowercase ,__lowercase )
if weight_type is not None:
A_ : Dict = getattr(__lowercase ,__lowercase ).shape
else:
A_ : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ : Dict = value
elif weight_type == "weight_g":
A_ : str = value
elif weight_type == "weight_v":
A_ : int = value
elif weight_type == "bias":
A_ : int = value
else:
A_ : List[Any] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = []
A_ : int = fairseq_model.state_dict()
A_ : Optional[Any] = hf_model.feature_extractor
A_ : List[str] = hf_model.adapter
for name, value in fairseq_dict.items():
A_ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,hf_model.config.feat_extract_norm == 'group' ,)
A_ : Optional[Any] = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
A_ : Tuple = True
if "*" in mapped_key:
A_ : Optional[Any] = name.split(__lowercase )[0].split('.' )[-2]
A_ : List[Any] = mapped_key.replace('*' ,__lowercase )
if "weight_g" in name:
A_ : Optional[int] = 'weight_g'
elif "weight_v" in name:
A_ : Union[str, Any] = 'weight_v'
elif "bias" in name:
A_ : Any = 'bias'
elif "weight" in name:
A_ : str = 'weight'
else:
A_ : Optional[Any] = None
set_recursively(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( __lowercase : Dict ,__lowercase : List[Any] ,__lowercase : Tuple ,__lowercase : Dict ,__lowercase : Any ):
'''simple docstring'''
A_ : List[Any] = full_name.split('conv_layers.' )[-1]
A_ : Optional[int] = name.split('.' )
A_ : Tuple = int(items[0] )
A_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ : str = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ : List[str] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowercase )
def UpperCamelCase ( __lowercase : Any ,__lowercase : Tuple ,__lowercase : Optional[Any] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = full_name.split('adaptor.' )[-1]
A_ : List[Any] = name.split('.' )
if items[1].isdigit():
A_ : Union[str, Any] = int(items[1] )
else:
A_ : Tuple = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
A_ : Dict = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
A_ : int = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
A_ : Dict = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
A_ : Tuple = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(__lowercase ,__lowercase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
A_ : Tuple = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
A_ : str = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(__lowercase )
def UpperCamelCase ( __lowercase : List[Any] ):
'''simple docstring'''
A_ , A_ : Any = emb.weight.shape
A_ : Tuple = nn.Linear(__lowercase ,__lowercase ,bias=__lowercase )
A_ : Optional[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase ( __lowercase : Any ,__lowercase : Optional[int] ,__lowercase : Any ,__lowercase : str ,__lowercase : Dict ,__lowercase : Dict ,__lowercase : Tuple ,__lowercase : Optional[int] ,__lowercase : List[str] ,__lowercase : List[Any] ,__lowercase : str ,):
'''simple docstring'''
A_ : Optional[int] = WavaVecaConfig.from_pretrained(
__lowercase ,add_adapter=__lowercase ,adapter_stride=__lowercase ,adapter_kernel_size=__lowercase ,use_auth_token=__lowercase ,output_hidden_size=__lowercase ,)
A_ : Any = MBartConfig.from_pretrained(__lowercase )
# load model
A_ , A_ , A_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} ,)
A_ : Union[str, Any] = model[0].eval()
# load feature extractor
A_ : Any = WavaVecaFeatureExtractor.from_pretrained(__lowercase ,use_auth_token=__lowercase )
# set weights for wav2vec2 encoder
A_ : Optional[Any] = WavaVecaModel(__lowercase )
recursively_load_weights_wavaveca(model.encoder ,__lowercase )
# load decoder weights
A_ : Dict = MBartForCausalLM(__lowercase )
A_ , A_ : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__lowercase )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
A_ : Optional[int] = SpeechEncoderDecoderModel(encoder=__lowercase ,decoder=__lowercase )
A_ : Any = False
A_ : List[Any] = MBartaaTokenizer(__lowercase )
tokenizer.save_pretrained(__lowercase )
A_ : Dict = hf_wavavec.config.to_dict()
A_ : Any = tokenizer.pad_token_id
A_ : Optional[Any] = tokenizer.bos_token_id
A_ : Union[str, Any] = tokenizer.eos_token_id
A_ : Dict = 'mbart50'
A_ : str = 'wav2vec2'
A_ : int = tokenizer.eos_token_id
A_ : List[str] = 25_00_04
A_ : int = tokenizer.eos_token_id
A_ : Optional[Any] = SpeechEncoderDecoderConfig.from_dict(__lowercase )
hf_wavavec.save_pretrained(__lowercase )
feature_extractor.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
_UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 192 | 1 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCamelCase__: str = AutoTokenizer.from_pretrained("xlm-roberta-base" )
UpperCamelCase__: int = """The dog is cute and lives in the garden house"""
UpperCamelCase__: Any = jnp.array([tokenizer.encode(SCREAMING_SNAKE_CASE__ )] )
UpperCamelCase__: Dict = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__: Optional[int] = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
UpperCamelCase__: Optional[Any] = model(SCREAMING_SNAKE_CASE__ )["""last_hidden_state"""]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
| 149 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25 | 0 |
from __future__ import annotations
def UpperCamelCase ( _a , _a ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
(lowercase_) :Dict = extended_euclid(_a , a % b )
lowercase_ :Any = a // b
return (y, x - k * y)
def UpperCamelCase ( _a , _a , _a , _a ) -> int:
'''simple docstring'''
(lowercase_) :Union[str, Any] = extended_euclid(_a , _a )
lowercase_ :Optional[Any] = na * na
lowercase_ :Dict = ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCamelCase ( _a , _a ) -> int:
'''simple docstring'''
(lowercase_) :Union[str, Any] = extended_euclid(_a , _a )
if b < 0:
lowercase_ :Dict = (b % n + n) % n
return b
def UpperCamelCase ( _a , _a , _a , _a ) -> int:
'''simple docstring'''
lowercase_ :str = invert_modulo(_a , _a ), invert_modulo(_a , _a )
lowercase_ :Optional[int] = na * na
lowercase_ :Optional[int] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 368 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Union[str, Any] ="""naver-clova-ix/donut-base-finetuned-docvqa"""
lowercase : Optional[int] =(
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
lowercase : Dict ="""document_qa"""
lowercase : List[Any] =AutoProcessor
lowercase : Tuple =VisionEncoderDecoderModel
lowercase : Optional[int] =["""image""", """text"""]
lowercase : Any =["""text"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :str = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowercase_ :Optional[int] = task_prompt.replace('''{user_input}''' , UpperCamelCase_ )
lowercase_ :List[Any] = self.pre_processor.tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase_ :Dict = self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCamelCase ( self , UpperCamelCase_ ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCamelCase_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCamelCase_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCamelCase_ , ).sequences
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Any = self.pre_processor.batch_decode(UpperCamelCase_ )[0]
lowercase_ :Any = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
lowercase_ :int = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
lowercase_ :Dict = re.sub(R'''<.*?>''' , '''''' , UpperCamelCase_ , count=1 ).strip() # remove first task start token
lowercase_ :Dict = self.pre_processor.tokenajson(UpperCamelCase_ )
return sequence["answer"]
| 252 | 0 |
'''simple docstring'''
def snake_case__ ( _A: int , _A: int ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = (IPNDMScheduler,)
__lowerCamelCase = (('num_inference_steps', 50),)
def UpperCAmelCase ( self :Optional[int] , **_lowercase :Dict ):
'''simple docstring'''
lowercase__ = {"num_train_timesteps": 10_00}
config.update(**_lowercase )
return config
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Dict=0 , **_lowercase :Optional[Any] ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps" , _lowercase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**_lowercase )
lowercase__ = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
lowercase__ = scheduler_class.from_pretrained(_lowercase )
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase__ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase__ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :Tuple , _lowercase :Any=0 , **_lowercase :Any ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps" , _lowercase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
lowercase__ = scheduler_class.from_pretrained(_lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase__ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase__ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self :Optional[Any] , **_lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_lowercase )
lowercase__ = scheduler_class(**_lowercase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_lowercase , _lowercase )
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_lowercase , _lowercase )
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
return sample
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps" , _lowercase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_lowercase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , "set_timesteps" ):
scheduler.set_timesteps(_lowercase )
elif num_inference_steps is not None and not hasattr(_lowercase , "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowercase , time_step=_lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_lowercase , time_step=_lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 201 |
import math
def _A ( __magic_name__ ):
lowercase__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__magic_name__ )
def _A ( __magic_name__ = 1 / 1_2345 ):
lowercase__ = 0
lowercase__ = 0
lowercase__ = 3
while True:
lowercase__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__magic_name__ ):
lowercase__ = int(__magic_name__ )
total_partitions += 1
if check_partition_perfect(__magic_name__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__magic_name__ )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 201 | 1 |
from pathlib import Path
import numpy as np
from PIL import Image
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray ):
return (gray > 127) & (gray <= 255)
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray ):
__UpperCAmelCase : int = np.zeros_like(__lowerCamelCase )
__UpperCAmelCase : str = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__UpperCAmelCase : Tuple = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__UpperCAmelCase : Optional[int] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__UpperCAmelCase : Tuple = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
a : Optional[Any] = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
a : Dict = np.array(Image.open(lena_path))
# kernel to be applied
a : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
a : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
a : Union[str, Any] = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 114 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__ )
class a ( lowercase__ ):
"""simple docstring"""
a : str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
a : ClassVar[Features] = Features({'audio': Audio()} )
a : ClassVar[Features] = Features({'transcription': Value('string' )} )
a : str = "audio"
a : str = "transcription"
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Optional[int] ) -> str:
if self.audio_column not in features:
raise ValueError(f"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowercase ):
raise ValueError(f"""Column {self.audio_column} is not an Audio type.""" )
__UpperCAmelCase : int = copy.deepcopy(self )
__UpperCAmelCase : str = self.input_schema.copy()
__UpperCAmelCase : List[str] = features[self.audio_column]
__UpperCAmelCase : Optional[Any] = input_schema
return task_template
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 114 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : List[Any] = '''marian'''
__UpperCAmelCase : int = ['''past_key_values''']
__UpperCAmelCase : str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , UpperCamelCase__=5_8101 , UpperCamelCase__=None , UpperCamelCase__=1024 , UpperCamelCase__=12 , UpperCamelCase__=4096 , UpperCamelCase__=16 , UpperCamelCase__=12 , UpperCamelCase__=4096 , UpperCamelCase__=16 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="gelu" , UpperCamelCase__=1024 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=5_8100 , UpperCamelCase__=False , UpperCamelCase__=5_8100 , UpperCamelCase__=0 , UpperCamelCase__=0 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
snake_case : Any = vocab_size
snake_case : List[str] = decoder_vocab_size or vocab_size
snake_case : str = max_position_embeddings
snake_case : str = d_model
snake_case : str = encoder_ffn_dim
snake_case : int = encoder_layers
snake_case : List[Any] = encoder_attention_heads
snake_case : str = decoder_ffn_dim
snake_case : List[str] = decoder_layers
snake_case : Any = decoder_attention_heads
snake_case : Any = dropout
snake_case : str = attention_dropout
snake_case : Optional[int] = activation_dropout
snake_case : int = activation_function
snake_case : Union[str, Any] = init_std
snake_case : str = encoder_layerdrop
snake_case : List[Any] = decoder_layerdrop
snake_case : Any = use_cache
snake_case : int = encoder_layers
snake_case : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case : Optional[int] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _lowerCAmelCase ( snake_case_ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
snake_case : Dict = {0: "batch"}
snake_case : str = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
snake_case : str = {0: "batch", 1: "decoder_sequence"}
snake_case : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case : Tuple = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
snake_case ,snake_case : List[str] = self.num_layers
for i in range(UpperCamelCase__ ):
snake_case : Tuple = {0: "batch", 2: "past_sequence + sequence"}
snake_case : Dict = {0: "batch", 2: "past_sequence + sequence"}
else:
snake_case : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case : str = super().outputs
else:
snake_case : Tuple = super(UpperCamelCase__ , self ).outputs
if self.use_past:
snake_case ,snake_case : Dict = self.num_layers
for i in range(UpperCamelCase__ ):
snake_case : int = {0: "batch", 2: "past_sequence + sequence"}
snake_case : int = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
snake_case : List[Any] = seq_length if not self.use_past else 1
snake_case : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
snake_case : Dict = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case ,snake_case : Optional[Any] = common_inputs["input_ids"].shape
snake_case : Optional[Any] = common_inputs["decoder_input_ids"].shape[1]
snake_case ,snake_case : List[Any] = self.num_attention_heads
snake_case : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : Any = decoder_seq_length + 3
snake_case : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case : Tuple = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
snake_case : Tuple = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case ,snake_case : Dict = self.num_layers
snake_case : Dict = min(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
snake_case : Optional[int] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
snake_case : Union[str, Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case ,snake_case : List[str] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
snake_case : List[str] = seqlen + 2
snake_case ,snake_case : Tuple = self.num_layers
snake_case ,snake_case : Optional[Any] = self.num_attention_heads
snake_case : List[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : int = common_inputs["attention_mask"].dtype
snake_case : List[str] = torch.cat(
[common_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
snake_case : Union[str, Any] = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case : Optional[Any] = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case : Optional[Any] = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
snake_case : Union[str, Any] = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
snake_case : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case : Optional[int] = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
snake_case : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case : Union[str, Any] = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
snake_case : int = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@property
def lowerCamelCase ( self ) -> float:
'''simple docstring'''
return 1e-4
| 112 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__snake_case = data_utils.TransfoXLTokenizer
__snake_case = data_utils.TransfoXLCorpus
__snake_case = data_utils
__snake_case = data_utils
def __lowerCAmelCase ( lowercase : Optional[int] , lowercase : int , lowercase : List[Any] , lowercase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowercase , "rb" ) as fp:
snake_case : int = pickle.load(lowercase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
snake_case : int = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
snake_case : str = corpus.vocab.__dict__
torch.save(lowercase , lowercase )
snake_case : str = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , lowercase )
snake_case : Dict = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(lowercase , lowercase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
snake_case : Union[str, Any] = os.path.abspath(lowercase )
snake_case : str = os.path.abspath(lowercase )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
snake_case : int = TransfoXLConfig()
else:
snake_case : Optional[int] = TransfoXLConfig.from_json_file(lowercase )
print(F'Building PyTorch model from configuration: {config}' )
snake_case : str = TransfoXLLMHeadModel(lowercase )
snake_case : str = load_tf_weights_in_transfo_xl(lowercase , lowercase , lowercase )
# Save pytorch-model
snake_case : Union[str, Any] = os.path.join(lowercase , lowercase )
snake_case : Optional[Any] = os.path.join(lowercase , lowercase )
print(F'Save PyTorch model to {os.path.abspath(lowercase )}' )
torch.save(model.state_dict() , lowercase )
print(F'Save configuration file to {os.path.abspath(lowercase )}' )
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
__snake_case = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 112 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.