code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
_lowercase : Optional[int] = sorted(numsa + numsa )
_lowercase , _lowercase : int = divmod(len(lowerCamelCase_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[str] = [float(x) for x in input("Enter the elements of first array: ").split()]
SCREAMING_SNAKE_CASE : str = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 21 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> list:
if n_term == "":
return []
_a = []
for temp in range(int(lowercase ) ):
series.append(F'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 63 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
_UpperCAmelCase : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
_UpperCAmelCase : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def _UpperCAmelCase ( self ):
'''simple docstring'''
print(f'Found {torch.cuda.device_count()} devices.' )
_UpperCAmelCase : Optional[Any] = ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self ):
'''simple docstring'''
print(f'Found {torch.cuda.device_count()} devices.' )
_UpperCAmelCase : Union[str, Any] = ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(f'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCAmelCase ( self ):
'''simple docstring'''
print(f'Found {torch.cuda.device_count()} devices, using 2 devices only' )
_UpperCAmelCase : Dict = ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ = (accelerator.state.process_index + 2, 10)
SCREAMING_SNAKE_CASE_ = torch.randint(0, 10, shape).to(accelerator.device)
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
SCREAMING_SNAKE_CASE_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
SCREAMING_SNAKE_CASE_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 368 |
from typing import Any
class a :
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = data
_UpperCAmelCase : Any = None
class a :
def __init__( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase : str = temp.next
print()
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = Node(A_ )
_UpperCAmelCase : Tuple = self.head
_UpperCAmelCase : Tuple = new_node
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase : int = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : Tuple = node_a.next
_UpperCAmelCase : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : List[Any] = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 189 | 0 |
import os
from distutils.util import strtobool
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
for e in env_keys:
_UpperCAmelCase = int(os.environ.get(a_ , -1 ) )
if val >= 0:
return val
return default
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str=False ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = os.environ.get(a_ , str(a_ ) )
return strtobool(a_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]="no" ) -> int:
'''simple docstring'''
_UpperCAmelCase = os.environ.get(a_ , str(a_ ) )
return value
| 339 |
from torch import nn
class __A ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
super().__init__()
__UpperCamelCase : Dict =class_size
__UpperCamelCase : Any =embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__UpperCamelCase : Any =nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.mlp(lowerCamelCase__ )
return logits
| 71 | 0 |
class UpperCamelCase__ :
def __init__(self : Tuple ):
__a : Tuple = {}
def lowerCAmelCase (self : Union[str, Any] ):
print(self.vertex )
for i in self.vertex:
print(lowerCAmelCase__ , ''' -> ''' , ''' -> '''.join([str(lowerCAmelCase__ ) for j in self.vertex[i]] ) )
def lowerCAmelCase (self : Dict , snake_case_ : int , snake_case_ : int ):
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCAmelCase__ )
else:
# else make a new vertex
__a : List[Any] = [to_vertex]
def lowerCAmelCase (self : Any ):
__a : str = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCAmelCase (self : str , snake_case_ : int , snake_case_ : list ):
__a : int = True
print(lowerCAmelCase__ , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 367 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 90 | 0 |
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def lowercase_ ( _snake_case ):
# getting number of pixels in the image
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_snake_case ):
for j in range(_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase__ : Union[str, Any] = imread('image_data/lena.jpg', 1)
# convert to its negative
UpperCAmelCase__ : List[str] = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 25 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__: str = logging.get_logger(__name__)
__magic_name__: Any = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = '''nllb-moe'''
lowercase__ : Optional[Any] = ['''past_key_values''']
lowercase__ : Union[str, Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase__=12_81_12 , lowerCAmelCase__=10_24 , lowerCAmelCase__=12 , lowerCAmelCase__=40_96 , lowerCAmelCase__=16 , lowerCAmelCase__=12 , lowerCAmelCase__=40_96 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0_5 , lowerCAmelCase__=0.0_5 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=10_24 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="float32" , lowerCAmelCase__=False , lowerCAmelCase__=1_28 , lowerCAmelCase__=64 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=0.0_0_1 , lowerCAmelCase__=0.0_0_1 , lowerCAmelCase__="all" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=1.0 , lowerCAmelCase__=0.2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = vocab_size
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : Optional[int] = d_model
__magic_name__ : Dict = encoder_ffn_dim
__magic_name__ : Optional[Any] = encoder_layers
__magic_name__ : Optional[Any] = encoder_attention_heads
__magic_name__ : int = decoder_ffn_dim
__magic_name__ : List[str] = decoder_layers
__magic_name__ : List[Any] = decoder_attention_heads
__magic_name__ : Union[str, Any] = dropout
__magic_name__ : Optional[int] = attention_dropout
__magic_name__ : Optional[Any] = activation_dropout
__magic_name__ : Union[str, Any] = activation_function
__magic_name__ : int = init_std
__magic_name__ : Any = encoder_layerdrop
__magic_name__ : List[Any] = decoder_layerdrop
__magic_name__ : Tuple = use_cache
__magic_name__ : str = encoder_layers
__magic_name__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__magic_name__ : Dict = router_z_loss_coef
__magic_name__ : Any = router_aux_loss_coef
__magic_name__ : Dict = decoder_sparse_step
__magic_name__ : Optional[int] = encoder_sparse_step
__magic_name__ : List[Any] = num_experts
__magic_name__ : Optional[int] = expert_capacity
__magic_name__ : str = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
__magic_name__ : List[Any] = router_dtype
__magic_name__ : Optional[Any] = router_ignore_padding_tokens
__magic_name__ : Optional[int] = batch_prioritized_routing
__magic_name__ : Union[str, Any] = second_expert_policy
__magic_name__ : List[Any] = normalize_router_prob_before_dropping
__magic_name__ : int = moe_eval_capacity_token_fraction
__magic_name__ : int = moe_token_dropout
__magic_name__ : Tuple = output_router_logits
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 138 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__magic_name__: str = logging.getLogger(__name__)
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : int = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""", type=_A, default="""data/dump.txt""", help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""", type=_A, default="""bert""", choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""", type=_A, default="""bert-base-uncased""", help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""", type=_A, default="""data/dump""", help="""The dump file prefix.""" )
__magic_name__ : Dict = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
__magic_name__ : Tuple = BertTokenizer.from_pretrained(args.tokenizer_name )
__magic_name__ : List[Any] = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
__magic_name__ : Optional[int] = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
__magic_name__ : Optional[int] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__magic_name__ : List[Any] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
__magic_name__ : Any = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
__magic_name__ : Any = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__magic_name__ : int = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
__magic_name__ : Optional[int] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path, """r""", encoding="""utf8""" ) as fp:
__magic_name__ : Tuple = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f'{len(_A )} examples to process.' )
__magic_name__ : List[Any] = []
__magic_name__ : str = 0
__magic_name__ : str = 10000
__magic_name__ : Dict = time.time()
for text in data:
__magic_name__ : Tuple = f'{bos} {text.strip()} {sep}'
__magic_name__ : Optional[int] = tokenizer.encode(_A, add_special_tokens=_A )
rslt.append(_A )
iter += 1
if iter % interval == 0:
__magic_name__ : Union[str, Any] = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
__magic_name__ : Any = time.time()
logger.info("""Finished binarization""" )
logger.info(f'{len(_A )} examples processed.' )
__magic_name__ : Tuple = f'{args.dump_file}.{args.tokenizer_name}.pickle'
__magic_name__ : Tuple = tokenizer.vocab_size
if vocab_size < (1 << 16):
__magic_name__ : Optional[int] = [np.uintaa(_A ) for d in rslt]
else:
__magic_name__ : str = [np.intaa(_A ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(_A, """wb""" ) as handle:
pickle.dump(rslt_, _A, protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 138 | 1 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A =get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( A__ ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PegasusTokenizer
UpperCamelCase = PegasusTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Any = PegasusTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self : List[str] ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def snake_case__ ( self : Union[str, Any] , **a_ : Optional[int] ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_A )
def snake_case__ ( self : List[Any] , a_ : Union[str, Any] ):
'''simple docstring'''
return ("This is a test", "This is a test")
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : int = "</s>"
__UpperCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(_A ) , 11_03 )
def snake_case__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Optional[int] = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
__UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0]
__UpperCAmelCase : Tuple = py_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0]
self.assertListEqual(_A , _A )
def snake_case__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__UpperCAmelCase : Union[str, Any] = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
__UpperCAmelCase : List[str] = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__UpperCAmelCase : Dict = tokenizer([raw_input_str] , return_tensors=_A ).input_ids[0]
self.assertListEqual(_A , _A )
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
__UpperCAmelCase : str = "To ensure a smooth flow of bank resolutions."
__UpperCAmelCase : Dict = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_A ).input_ids[0]
self.assertListEqual(_A , _A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = ["This is going to be way too long." * 1_50, "short example"]
__UpperCAmelCase : Any = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : Tuple = self._large_tokenizer(_A , padding=_A , truncation=_A , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = self._large_tokenizer(
text_target=_A , max_length=5 , padding=_A , truncation=_A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(_A ) == 2 # input_ids, attention_mask.
@slow
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Tuple = {"input_ids": [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( A__ ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = PegasusTokenizer
UpperCamelCase = PegasusTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Optional[Any] = PegasusTokenizer(_A , offset=0 , mask_token_sent=_A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self : List[str] ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def snake_case__ ( self : List[str] , **a_ : List[str] ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_A )
def snake_case__ ( self : List[str] , a_ : str ):
'''simple docstring'''
return ("This is a test", "This is a test")
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : List[Any] = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
__UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0]
__UpperCAmelCase : Dict = py_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0]
self.assertListEqual(_A , _A )
@require_torch
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = ["This is going to be way too long." * 10_00, "short example"]
__UpperCAmelCase : Dict = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : int = self._large_tokenizer(_A , padding=_A , truncation=_A , return_tensors='''pt''' )
__UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=_A , max_length=5 , padding=_A , truncation=_A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(_A ) == 2 # input_ids, attention_mask.
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
__UpperCAmelCase : Any = self._large_tokenizer(_A ).input_ids
self.assertListEqual(
_A , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 226 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 0 |
from typing import Dict
from .base import GenericTensor, Pipeline
class a_ ( a__ ):
"""simple docstring"""
def __lowerCAmelCase ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ) ->int:
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
SCREAMING_SNAKE_CASE : Optional[Any] = truncation
SCREAMING_SNAKE_CASE : List[Any] = tokenize_kwargs
SCREAMING_SNAKE_CASE : Optional[int] = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE : int = return_tensors
return preprocess_params, {}, postprocess_params
def __lowerCAmelCase ( self , _lowerCamelCase , **_lowerCamelCase ) ->Dict[str, GenericTensor]:
SCREAMING_SNAKE_CASE : List[str] = self.framework
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
return model_inputs
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Tuple = self.model(**_lowerCamelCase )
return model_outputs
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=False ) ->int:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ) ->Any:
return super().__call__(*_lowerCamelCase , **_lowerCamelCase )
| 19 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Tuple = '''▁'''
a__ : List[Any] = {'''vocab_file''': '''spiece.model'''}
a__ : Optional[Any] = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
a__ : str = {
'''google/pegasus-xsum''': 512,
}
a__ : str = logging.get_logger(__name__)
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<pad>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<mask_2>" , _lowerCamelCase="<mask_1>" , _lowerCamelCase=None , _lowerCamelCase=103 , _lowerCamelCase = None , **_lowerCamelCase , ) ->None:
SCREAMING_SNAKE_CASE : Dict = offset
if additional_special_tokens is not None:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError(
F"""additional_special_tokens should be of type {type(_lowerCamelCase )}, but is"""
F""" {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(_lowerCamelCase ) , self.offset - 1 )
]
if len(set(_lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
SCREAMING_SNAKE_CASE : Dict = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE : str = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , mask_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token_sent=_lowerCamelCase , offset=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : List[str] = mask_token_sent
SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
SCREAMING_SNAKE_CASE : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
# add special tokens to encoder dict
SCREAMING_SNAKE_CASE : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
SCREAMING_SNAKE_CASE : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def __lowerCAmelCase ( self ) ->int:
return len(self.sp_model ) + self.offset
def __lowerCAmelCase ( self ) ->Dict[str, int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : str = None
return state
def __setstate__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : List[str] = {}
SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
SCREAMING_SNAKE_CASE : List[str] = self.sp_model.piece_to_id(_lowerCamelCase )
return sp_id + self.offset
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
SCREAMING_SNAKE_CASE : Dict = self.sp_model.IdToPiece(index - self.offset )
return token
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : int = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->str:
return 1
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : Dict = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) ->List[int]:
if already_has_special_tokens:
return self._special_token_mask(_lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(_lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : int = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : Tuple = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 19 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( SCREAMING_SNAKE_CASE : int | float | str , SCREAMING_SNAKE_CASE : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
lowerCAmelCase : Optional[int] = int(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = int(SCREAMING_SNAKE_CASE )
lowerCAmelCase : list[str] = []
for temp in range(int(SCREAMING_SNAKE_CASE ) ):
series.append(f"""1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE ) )}""" if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter the last number (nth term) of the P-Series'''))
lowerCAmelCase__ = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 108 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase__ = get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : Optional[Any] ="dummy_data"
a : int ="datasets"
a : Tuple =False
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = False , snake_case__ = True , snake_case__ = None , ):
"""simple docstring"""
lowerCAmelCase : Tuple = 0
lowerCAmelCase : int = dataset_name
lowerCAmelCase : List[Any] = cache_dir
lowerCAmelCase : List[str] = use_local_dummy_data
lowerCAmelCase : List[str] = config
# download_callbacks take a single url as input
lowerCAmelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCAmelCase : Tuple = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCAmelCase : Union[str, Any] = str(snake_case__ )
# to be downloaded
lowerCAmelCase : List[Any] = None
lowerCAmelCase : List[Any] = None
@property
def lowercase__ ( self ):
"""simple docstring"""
if self._dummy_file is None:
lowerCAmelCase : Any = self.download_dummy_data()
return self._dummy_file
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def lowercase__ ( self ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCAmelCase : str = cached_path(
snake_case__ , cache_dir=self.cache_dir , extract_compressed_file=snake_case__ , force_extract=snake_case__ )
return os.path.join(snake_case__ , self.dummy_file_name )
@property
def lowercase__ ( self ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowercase__ ( self ):
"""simple docstring"""
if self._bucket_url is None:
lowerCAmelCase : Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def lowercase__ ( self ):
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def lowercase__ ( self , snake_case__ , *snake_case__ ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCAmelCase : int = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCAmelCase : List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case__ , snake_case__ ):
return self.create_dummy_data_dict(snake_case__ , snake_case__ )
elif isinstance(snake_case__ , (list, tuple) ):
return self.create_dummy_data_list(snake_case__ , snake_case__ )
else:
return self.create_dummy_data_single(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , *snake_case__ ):
"""simple docstring"""
return self.download_and_extract(snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
return self.download_and_extract(snake_case__ )
def lowercase__ ( self , snake_case__ , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return path
def lowercase__ ( self ):
"""simple docstring"""
return {}
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case__ , snake_case__ ):
for single_url in single_urls:
download_callback(snake_case__ )
else:
lowerCAmelCase : List[str] = single_urls
download_callback(snake_case__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Tuple = [os.path.join(snake_case__ , urllib.parse.quote_plus(Path(snake_case__ ).name ) ) for x in single_urls]
else:
lowerCAmelCase : int = single_urls
lowerCAmelCase : Any = os.path.join(snake_case__ , urllib.parse.quote_plus(Path(snake_case__ ).name ) )
lowerCAmelCase : Union[str, Any] = value
# make sure that values are unique
if all(isinstance(snake_case__ , snake_case__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCAmelCase : Union[str, Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCAmelCase : Optional[Any] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , snake_case__ ) ) for url in data_url )
lowerCAmelCase : Any = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCAmelCase : int = [data_url[0]] * len(snake_case__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase : Dict = os.path.join(snake_case__ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(snake_case__ )
return dummy_data_list
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(snake_case__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase : Tuple = os.path.join(snake_case__ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(snake_case__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
def _iter_archive_members(snake_case__ ):
# this preserves the order of the members inside the ZIP archive
lowerCAmelCase : str = Path(self.dummy_file ).parent
lowerCAmelCase : Optional[Any] = path.relative_to(snake_case__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCAmelCase : List[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case__ )
lowerCAmelCase : List[Any] = Path(snake_case__ )
lowerCAmelCase : str = _iter_archive_members(snake_case__ ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(snake_case__ ).as_posix(), file_path.open("rb" )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : List[Any] = [paths]
for path in paths:
if os.path.isfile(snake_case__ ):
if os.path.basename(snake_case__ ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case__ ):
if os.path.basename(snake_case__ ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(snake_case__ ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(snake_case__ , snake_case__ )
| 108 | 1 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A , A ) -> list[tuple[int, int]]:
lowerCAmelCase__ , lowerCAmelCase__ = position
lowerCAmelCase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCAmelCase__ = []
for position in positions:
lowerCAmelCase__ , lowerCAmelCase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(A )
return permissible_positions
def _snake_case ( A ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def _snake_case ( A , A , A ) -> bool:
if is_complete(A ):
return True
for position in get_valid_pos(A , len(A ) ):
lowerCAmelCase__ , lowerCAmelCase__ = position
if board[y][x] == 0:
lowerCAmelCase__ = curr + 1
if open_knight_tour_helper(A , A , curr + 1 ):
return True
lowerCAmelCase__ = 0
return False
def _snake_case ( A ) -> list[list[int]]:
lowerCAmelCase__ = [[0 for i in range(A )] for j in range(A )]
for i in range(A ):
for j in range(A ):
lowerCAmelCase__ = 1
if open_knight_tour_helper(A , (i, j) , 1 ):
return board
lowerCAmelCase__ = 0
lowerCAmelCase__ = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 356 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
__UpperCAmelCase = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def _snake_case ( A , A ) -> Optional[Any]:
lowerCAmelCase__ = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase__ = int(re.match(R'''.*layer_(\d*).*''' , A )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def _snake_case ( A ) -> Optional[int]:
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase__ = re.search(R'''[^\d](\d+)$''' , str(A ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
lowerCAmelCase__ = int(bit_search.groups()[0] )
return bit_size // 8
def _snake_case ( A , A , A , A , A ) -> Dict:
# Construct model
if bloom_config_file == "":
lowerCAmelCase__ = BloomConfig()
else:
lowerCAmelCase__ = BloomConfig.from_json_file(A )
if shard_model:
lowerCAmelCase__ = os.listdir(A )
lowerCAmelCase__ = sorted(filter(lambda A : s.startswith('''layer''' ) and "model_00" in s , A ) )
lowerCAmelCase__ = {'''weight_map''': {}, '''metadata''': {}}
lowerCAmelCase__ = 0
lowerCAmelCase__ = None
lowerCAmelCase__ = BloomConfig()
for j, file in enumerate(A ):
print('''Processing file: {}'''.format(A ) )
lowerCAmelCase__ = None
for i in range(A ):
# load all TP files
lowerCAmelCase__ = file.replace('''model_00''' , F"""model_0{i}""" )
lowerCAmelCase__ = torch.load(os.path.join(A , A ) , map_location='''cpu''' )
# Rename keys in the transformers names
lowerCAmelCase__ = list(temp.keys() )
for key in keys:
lowerCAmelCase__ = temp.pop(A )
if tensors is None:
lowerCAmelCase__ = temp
else:
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase__ = torch.cat([tensors[key], temp[key]] , dim=A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase__ = tensors[key] / pretraining_tp
torch.save(
A , os.path.join(
A , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(A ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase__ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase__ = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(A ) ).zfill(5 ) )
lowerCAmelCase__ = BloomConfig()
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
lowerCAmelCase__ = total_size
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ = json.dumps(A , indent=2 , sort_keys=A ) + '''\n'''
f.write(A )
else:
lowerCAmelCase__ = BloomModel(A )
lowerCAmelCase__ = os.listdir(A )
lowerCAmelCase__ = sorted(filter(lambda A : s.startswith('''layer''' ) and "model_00" in s , A ) )
lowerCAmelCase__ = None
for i, file in enumerate(A ):
lowerCAmelCase__ = None
for i in range(A ):
# load all TP files
lowerCAmelCase__ = file.replace('''model_00''' , F"""model_0{i}""" )
lowerCAmelCase__ = torch.load(os.path.join(A , A ) , map_location='''cpu''' )
# Rename keys in the transformers names
lowerCAmelCase__ = list(temp.keys() )
for key in keys:
lowerCAmelCase__ = temp.pop(A )
if tensors is None:
lowerCAmelCase__ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase__ = torch.cat([tensors[key], temp[key]] , dim=A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase__ = tensors[key] / pretraining_tp
lowerCAmelCase__ = model.load_state_dict(A , strict=A )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
lowerCAmelCase__ = set(other_keys.missing_keys )
else:
lowerCAmelCase__ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(A , exist_ok=A )
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
lowerCAmelCase__ = model.to(config.torch_dtype )
torch.save(model.state_dict() , A )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
__UpperCAmelCase = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 228 | 0 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_a = True
except ImportError:
_a = False
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
def _A ( UpperCamelCase_ : Namespace) -> List[Any]:
'''simple docstring'''
return AddNewModelCommand(args.testing, args.testing_file, path=args.path)
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
@staticmethod
def _lowercase ( UpperCAmelCase__ : ArgumentParser ):
__lowercase = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing", action="store_true", help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file", type=UpperCAmelCase__, help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path", type=UpperCAmelCase__, help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self : Tuple, UpperCAmelCase__ : bool, UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[Any]=None, *UpperCAmelCase__ : Union[str, Any] ):
__lowercase = testing
__lowercase = testing_file
__lowercase = path
def _lowercase ( self : Any ):
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__lowercase = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:2_2]]
if len(UpperCAmelCase__ ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
__lowercase = (
Path(UpperCAmelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
__lowercase = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCAmelCase__ ) )
else:
with open(self._testing_file, "r" ) as configuration_file:
__lowercase = json.load(UpperCAmelCase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ), no_input=UpperCAmelCase__, extra_context=UpperCAmelCase__, )
__lowercase = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:2_2]][0]
# Retrieve configuration
with open(directory + "/configuration.json", "r" ) as configuration_file:
__lowercase = json.load(UpperCAmelCase__ )
__lowercase = configuration["lowercase_modelname"]
__lowercase = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(F"""{directory}/configuration.json""" )
__lowercase = "PyTorch" in generate_tensorflow_pytorch_and_flax
__lowercase = "TensorFlow" in generate_tensorflow_pytorch_and_flax
__lowercase = "Flax" in generate_tensorflow_pytorch_and_flax
__lowercase = F"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(UpperCAmelCase__, exist_ok=UpperCAmelCase__ )
os.makedirs(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""", exist_ok=UpperCAmelCase__ )
# Tests require submodules as they have parent imports
with open(F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""", "w" ):
pass
shutil.move(
F"""{directory}/__init__.py""", F"""{model_dir}/__init__.py""", )
shutil.move(
F"""{directory}/configuration_{lowercase_model_name}.py""", F"""{model_dir}/configuration_{lowercase_model_name}.py""", )
def remove_copy_lines(UpperCAmelCase__ : int ):
with open(UpperCAmelCase__, "r" ) as f:
__lowercase = f.readlines()
with open(UpperCAmelCase__, "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCAmelCase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_{lowercase_model_name}.py""", F"""{model_dir}/modeling_{lowercase_model_name}.py""", )
shutil.move(
F"""{directory}/test_modeling_{lowercase_model_name}.py""", F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""", )
else:
os.remove(F"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_tf_{lowercase_model_name}.py""", F"""{model_dir}/modeling_tf_{lowercase_model_name}.py""", )
shutil.move(
F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""", F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""", )
else:
os.remove(F"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/modeling_flax_{lowercase_model_name}.py""", F"""{model_dir}/modeling_flax_{lowercase_model_name}.py""", )
shutil.move(
F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""", F"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""", )
else:
os.remove(F"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(F"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
F"""{directory}/{lowercase_model_name}.md""", F"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""", )
shutil.move(
F"""{directory}/tokenization_{lowercase_model_name}.py""", F"""{model_dir}/tokenization_{lowercase_model_name}.py""", )
shutil.move(
F"""{directory}/tokenization_fast_{lowercase_model_name}.py""", F"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""", )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCAmelCase__ : str, UpperCAmelCase__ : str, UpperCAmelCase__ : List[str] ):
# Create temp file
__lowercase ,__lowercase = mkstemp()
__lowercase = False
with fdopen(UpperCAmelCase__, "w" ) as new_file:
with open(UpperCAmelCase__ ) as old_file:
for line in old_file:
new_file.write(UpperCAmelCase__ )
if line_to_copy_below in line:
__lowercase = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCAmelCase__ )
if not line_found:
raise ValueError(F"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(UpperCAmelCase__, UpperCAmelCase__ )
# Remove original file
remove(UpperCAmelCase__ )
# Move new file
move(UpperCAmelCase__, UpperCAmelCase__ )
def skip_units(UpperCAmelCase__ : Any ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCAmelCase__ : Optional[Any] ):
with open(UpperCAmelCase__ ) as datafile:
__lowercase = []
__lowercase = False
__lowercase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__lowercase = line.split("\"" )[1]
__lowercase = skip_units(UpperCAmelCase__ )
elif "# Below: " in line and "##" not in line:
__lowercase = line.split("\"" )[1]
__lowercase = skip_units(UpperCAmelCase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = []
elif "# Replace with" in line and "##" not in line:
__lowercase = []
elif "##" not in line:
lines_to_copy.append(UpperCAmelCase__ )
remove(UpperCAmelCase__ )
replace_in_files(F"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(UpperCAmelCase__ )
| 17 | """simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('.' ):
snake_case = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
snake_case = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
snake_case = True
if "*" in mapped_key:
snake_case = name.split(_UpperCamelCase )[0].split('.' )[-2]
snake_case = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
snake_case = 'weight_g'
elif "weight_v" in name:
snake_case = 'weight_v'
elif "weight" in name:
snake_case = 'weight'
elif "bias" in name:
snake_case = 'bias'
else:
snake_case = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
"""simple docstring"""
snake_case = full_name.split('conv_layers.' )[-1]
snake_case = name.split('.' )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Any=None , _UpperCamelCase : Union[str, Any]=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
snake_case = HubertConfig.from_pretrained(_UpperCamelCase )
else:
snake_case = HubertConfig()
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(_UpperCamelCase , 'vocab.json' )
if not os.path.isdir(_UpperCamelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , _UpperCamelCase )
snake_case = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCamelCase , )
snake_case = True if config.feat_extract_norm == 'layer' else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
snake_case = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
snake_case = HubertForCTC(_UpperCamelCase )
else:
snake_case = HubertModel(_UpperCamelCase )
if is_finetuned:
snake_case ,snake_case ,snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
snake_case ,snake_case ,snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case = model[0].eval()
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 150 | 0 |
from ...configuration_utils import PretrainedConfig
a : Dict = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class _a ( _lowerCAmelCase ):
A = '''tapas'''
def __init__(self, SCREAMING_SNAKE_CASE_=30522, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=[3, 256, 256, 2, 256, 256, 10], SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=1E-12, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=1_0.0, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_="ratio", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCAmelCase_: Any = vocab_size
UpperCAmelCase_: Optional[Any] = hidden_size
UpperCAmelCase_: Any = num_hidden_layers
UpperCAmelCase_: Any = num_attention_heads
UpperCAmelCase_: List[str] = hidden_act
UpperCAmelCase_: Tuple = intermediate_size
UpperCAmelCase_: List[Any] = hidden_dropout_prob
UpperCAmelCase_: Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_: Dict = max_position_embeddings
UpperCAmelCase_: Union[str, Any] = type_vocab_sizes
UpperCAmelCase_: Tuple = initializer_range
UpperCAmelCase_: Tuple = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCAmelCase_: Tuple = positive_label_weight
UpperCAmelCase_: Union[str, Any] = num_aggregation_labels
UpperCAmelCase_: List[str] = aggregation_loss_weight
UpperCAmelCase_: Any = use_answer_as_supervision
UpperCAmelCase_: Tuple = answer_loss_importance
UpperCAmelCase_: Union[str, Any] = use_normalized_answer_loss
UpperCAmelCase_: List[Any] = huber_loss_delta
UpperCAmelCase_: int = temperature
UpperCAmelCase_: Union[str, Any] = aggregation_temperature
UpperCAmelCase_: Any = use_gumbel_for_cells
UpperCAmelCase_: str = use_gumbel_for_aggregation
UpperCAmelCase_: Tuple = average_approximation_function
UpperCAmelCase_: int = cell_selection_preference
UpperCAmelCase_: Optional[Any] = answer_loss_cutoff
UpperCAmelCase_: Union[str, Any] = max_num_rows
UpperCAmelCase_: str = max_num_columns
UpperCAmelCase_: Any = average_logits_per_cell
UpperCAmelCase_: List[Any] = select_one_column
UpperCAmelCase_: Union[str, Any] = allow_empty_column_selection
UpperCAmelCase_: Optional[Any] = init_cell_selection_weights_to_zero
UpperCAmelCase_: List[str] = reset_position_index_per_cell
UpperCAmelCase_: Tuple = disable_per_token_loss
# Aggregation hyperparameters
UpperCAmelCase_: List[str] = aggregation_labels
UpperCAmelCase_: Any = no_aggregation_label_index
if isinstance(self.aggregation_labels, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: List[str] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in aggregation_labels.items()}
| 82 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _a ( unittest.TestCase , _lowerCAmelCase ):
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Optional[int] = load_tool("""text-classification""" )
self.tool.setup()
UpperCAmelCase_: str = load_tool("""text-classification""", remote=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = self.tool("""That's quite cool""", ["""positive""", """negative"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, """positive""" )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[str] = self.remote_tool("""That's quite cool""", ["""positive""", """negative"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, """positive""" )
def __snake_case (self ) -> Any:
UpperCAmelCase_: Tuple = self.tool(text="""That's quite cool""", labels=["""positive""", """negative"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, """positive""" )
def __snake_case (self ) -> int:
UpperCAmelCase_: Dict = self.remote_tool(text="""That's quite cool""", labels=["""positive""", """negative"""] )
self.assertEqual(SCREAMING_SNAKE_CASE_, """positive""" )
| 82 | 1 |
"""simple docstring"""
import os
from distutils.util import strtobool
def lowercase ( __snake_case : List[Any] , __snake_case : List[str] ):
for e in env_keys:
lowercase_ : Optional[int] = int(os.environ.get(_A , -1 ) )
if val >= 0:
return val
return default
def lowercase ( __snake_case : List[Any] , __snake_case : List[str]=False ):
lowercase_ : int = os.environ.get(_A , str(_A ) )
return strtobool(_A ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase ( __snake_case : int , __snake_case : int="no" ):
lowercase_ : List[str] = os.environ.get(_A , str(_A ) )
return value
| 33 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = StableUnCLIPImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def lowercase_ ( self : int ):
a : Dict = 32
a : str = embedder_hidden_size
# image encoding components
a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a : Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
a : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
a : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
a : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
a : List[Any] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL()
a : str = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=True ):
if str(__snake_case ).startswith('mps' ):
a : Tuple = torch.manual_seed(__snake_case )
else:
a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
if pil_image:
a : Optional[Any] = input_image * 0.5 + 0.5
a : Optional[Any] = input_image.clamp(0 , 1 )
a : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a : int = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Union[str, Any] = self.get_dummy_components()
a : Any = StableUnCLIPImgaImgPipeline(**__snake_case )
a : Tuple = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
a : Union[str, Any] = self.get_dummy_inputs(__snake_case )
inputs.update({'image_embeds': None} )
a : str = sd_pipe(**__snake_case ).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self : List[str] ):
a : int = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
a : Optional[int] = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Optional[int] ):
a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
a : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a : str = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
a : Optional[Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = pipe(
__snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 297 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 85 | """simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = LongformerTokenizer
lowercase__ = True
lowercase__ = LongformerTokenizerFast
lowercase__ = True
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__UpperCamelCase =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCamelCase ={'''unk_token''': '''<unk>'''}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] , **UpperCamelCase__ : str ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , **UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase ='''lower newer'''
__UpperCamelCase ='''lower newer'''
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='''lower newer'''
__UpperCamelCase =['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=UpperCamelCase__ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=UpperCamelCase__ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCamelCase =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase ='''Encode this sequence.'''
__UpperCamelCase =tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing spaces after special tokens
__UpperCamelCase ='''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )} ) # mask token has a left space
__UpperCamelCase =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
__UpperCamelCase ='''Encode <mask> sequence'''
__UpperCamelCase ='''Encode <mask>sequence'''
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
__UpperCamelCase =encoded.index(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
__UpperCamelCase =encoded.index(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase ='''A, <mask> AllenNLP sentence.'''
__UpperCamelCase =tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
__UpperCamelCase =tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCamelCase =json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , UpperCamelCase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , UpperCamelCase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase ='''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCamelCase =f"""{text_of_1_token} {text_of_1_token}"""
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ) + 1, 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
| 85 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
__lowerCAmelCase : Optional[Any] = LEDConfig
__lowerCAmelCase : Any = {}
__lowerCAmelCase : Optional[Any] = 'gelu'
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=20 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=4 , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : Optional[Any] = seq_length
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : Union[str, Any] = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : List[str] = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Optional[int] = eos_token_id
UpperCAmelCase : str = pad_token_id
UpperCAmelCase : Tuple = bos_token_id
UpperCAmelCase : Optional[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCAmelCase : int = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCAmelCase : str = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCAmelCase : Optional[int] = prepare_led_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = tf.concat(
[tf.zeros_like(_SCREAMING_SNAKE_CASE )[:, :-1], tf.ones_like(_SCREAMING_SNAKE_CASE )[:, -1:]] , axis=-1 , )
UpperCAmelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : Tuple = TFLEDModel(config=_SCREAMING_SNAKE_CASE ).get_decoder()
UpperCAmelCase : Optional[Any] = inputs_dict["""input_ids"""]
UpperCAmelCase : Any = input_ids[:1, :]
UpperCAmelCase : int = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase : Optional[Any] = 1
# first forward pass
UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : int = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-3 )
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str]=None , UpperCamelCase : str=None , UpperCamelCase : Dict=None , UpperCamelCase : List[Any]=None , ):
if attention_mask is None:
UpperCAmelCase : int = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__lowerCAmelCase : Dict = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__lowerCAmelCase : int = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCAmelCase : Any = True
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Dict = False
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] = TFLEDModelTester(self )
UpperCAmelCase : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Union[str, Any] = tf.zeros_like(inputs_dict["""attention_mask"""] )
UpperCAmelCase : Tuple = 2
UpperCAmelCase : Tuple = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
UpperCAmelCase : List[Any] = True
UpperCAmelCase : int = self.model_tester.seq_length
UpperCAmelCase : Optional[int] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : str = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[Any] = [t.numpy() for t in outputs.encoder_attentions]
UpperCAmelCase : int = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : List[str] = False
UpperCAmelCase : str = False
UpperCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
UpperCAmelCase : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase : List[str] = True
UpperCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Dict = True
UpperCAmelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
pass
def _snake_case ( UpperCamelCase : Optional[Any] ):
return tf.constant(UpperCamelCase , dtype=tf.intaa )
A: Tuple = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
UpperCAmelCase : int = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase : List[Any] = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase : Union[str, Any] = prepare_led_inputs_dict(model.config , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = model(**_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : List[str] = (1, 1024, 768)
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# change to expected output here
UpperCAmelCase : Dict = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
UpperCAmelCase : Optional[Any] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase : List[str] = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase : Optional[Any] = prepare_led_inputs_dict(model.config , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = model(**_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : List[Any] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# change to expected output here
UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-3 , rtol=1E-3 )
| 109 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def _snake_case ( UpperCamelCase : int = 1000000 , UpperCamelCase : int = 10 ):
UpperCAmelCase : defaultdict = defaultdict(UpperCamelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase : str = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase : Optional[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCamelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class __magic_name__ (__lowercase ):
def __init__( self , *_a , **_a ) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , _a , )
super().__init__(*_a , **_a )
| 366 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A(__a: Dict , __a: List[str]=None ):
require_version(deps[pkg] , __a )
| 22 | 0 |
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowerCamelCase_ : Union[str, Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowerCamelCase_ : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'{len(upper_files)} files contain uppercase characters:')
print("""\n""".join(upper_files) + """\n""")
lowerCamelCase_ : Any = [file for file in filepaths if """ """ in file]
if space_files:
print(F'{len(space_files)} files contain space characters:')
print("""\n""".join(space_files) + """\n""")
lowerCamelCase_ : Dict = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'{len(hyphen_files)} files contain hyphen characters:')
print("""\n""".join(hyphen_files) + """\n""")
lowerCamelCase_ : Union[str, Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'{len(nodir_files)} files are not in a directory:')
print("""\n""".join(nodir_files) + """\n""")
lowerCamelCase_ : Optional[int] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files) | 81 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
SCREAMING_SNAKE_CASE__ = _symbol_database.Default()
SCREAMING_SNAKE_CASE__ = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
SCREAMING_SNAKE_CASE__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
SCREAMING_SNAKE_CASE__ = 4_5
SCREAMING_SNAKE_CASE__ = 1_5_8_1
SCREAMING_SNAKE_CASE__ = 1_5_1_7
SCREAMING_SNAKE_CASE__ = 1_5_7_0
SCREAMING_SNAKE_CASE__ = 1_5_8_4
SCREAMING_SNAKE_CASE__ = 1_7_9_3
SCREAMING_SNAKE_CASE__ = 1_7_9_5
SCREAMING_SNAKE_CASE__ = 1_9_1_6
SCREAMING_SNAKE_CASE__ = 1_8_6_4
SCREAMING_SNAKE_CASE__ = 1_9_0_5
SCREAMING_SNAKE_CASE__ = 1_9_1_9
SCREAMING_SNAKE_CASE__ = 2_4_2_9
SCREAMING_SNAKE_CASE__ = 2_2_0_8
SCREAMING_SNAKE_CASE__ = 2_4_1_8
SCREAMING_SNAKE_CASE__ = 2_3_2_3
SCREAMING_SNAKE_CASE__ = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 321 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[int] = []
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__UpperCAmelCase ) )
elif isinstance(__UpperCAmelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__UpperCAmelCase ) )
elif isinstance(__UpperCAmelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : str = []
for d in reversed(__UpperCAmelCase ):
idx.append(flat_idx % d )
snake_case_ : Optional[Any] = flat_idx // d
return tuple(reversed(__UpperCAmelCase ) )
@torch.jit.ignore
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a = None , __a = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(__a ) -> None:
snake_case_ : Tuple = True
for i in range(len(__UpperCAmelCase ) ):
snake_case_ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
snake_case_ : Dict = l[reversed_idx]
if start_edges is None:
snake_case_ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(__UpperCAmelCase )
if end_edges is None:
snake_case_ : Optional[int] = [e == (d - 1) for e, d in zip(__UpperCAmelCase , __UpperCAmelCase )]
reduce_edge_list(__UpperCAmelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__UpperCAmelCase ) == 0:
return [()]
elif len(__UpperCAmelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
snake_case_ : List[Tuple[slice, ...]] = []
snake_case_ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__UpperCAmelCase , __UpperCAmelCase ):
if s == e:
path_list.append(slice(__UpperCAmelCase , s + 1 ) )
else:
break
snake_case_ : Tuple[slice, ...] = tuple(__UpperCAmelCase )
snake_case_ : int = len(__UpperCAmelCase )
# start == end, and we're done
if divergence_idx == len(__UpperCAmelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case_ : int = start[divergence_idx]
return tuple(
path + (slice(__UpperCAmelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case_ : Dict = end[divergence_idx]
return tuple(
path + (slice(__UpperCAmelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
snake_case_ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ):
snake_case_ : List[Any] = t.shape[:no_batch_dims]
snake_case_ : Optional[Any] = list(_flat_idx_to_idx(__UpperCAmelCase , __UpperCAmelCase ) )
# _get_minimal_slice_set is inclusive
snake_case_ : Dict = list(_flat_idx_to_idx(flat_end - 1 , __UpperCAmelCase ) )
# Get an ordered list of slices to perform
snake_case_ : int = _get_minimal_slice_set(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
snake_case_ : Tuple = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a = False , __a = None , __a = False , ):
if not (len(__UpperCAmelCase ) > 0):
raise ValueError('Must provide at least one input' )
snake_case_ : List[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(__UpperCAmelCase )]
snake_case_ : int = tuple([max(__UpperCAmelCase ) for s in zip(*__UpperCAmelCase )] )
def _prep_inputs(__a ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
snake_case_ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
snake_case_ : List[str] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
snake_case_ : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
snake_case_ : Dict[str, Any] = tensor_tree_map(_prep_inputs , __UpperCAmelCase )
snake_case_ : int = None
if _out is not None:
snake_case_ : str = tensor_tree_map(lambda __a : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
snake_case_ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
snake_case_ : Any = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__a ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
snake_case_ : Optional[Any] = 0
snake_case_ : int = prepped_outputs
for _ in range(__UpperCAmelCase ):
# Chunk the input
if not low_mem:
snake_case_ : int = _select_chunk
else:
snake_case_ : Optional[Any] = partial(
_chunk_slice , flat_start=__UpperCAmelCase , flat_end=min(__UpperCAmelCase , i + chunk_size ) , no_batch_dims=len(__UpperCAmelCase ) , )
snake_case_ : Dict[str, Any] = tensor_tree_map(__UpperCAmelCase , __UpperCAmelCase )
# Run the layer on the chunk
snake_case_ : Optional[Any] = layer(**__UpperCAmelCase )
# Allocate space for the output
if out is None:
snake_case_ : Any = tensor_tree_map(lambda __a : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __UpperCAmelCase )
# Put the chunk in its pre-allocated space
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
def assign(__a , __a ) -> None:
for k, v in da.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
assign(__UpperCAmelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
snake_case_ : Optional[int] = da[k]
assign(__UpperCAmelCase , __UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for xa, xa in zip(__UpperCAmelCase , __UpperCAmelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
snake_case_ : Optional[int] = xa
elif isinstance(__UpperCAmelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
snake_case_ : Union[str, Any] = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
snake_case_ : Any = tensor_tree_map(lambda __a : t.view(orig_batch_dims + t.shape[1:] ) , __UpperCAmelCase )
return out
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Any , _A : List[str] = 512 , ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : int = max_chunk_size
snake_case_ : Optional[int] = None
snake_case_ : Optional[tuple] = None
def UpperCAmelCase_ ( self : Union[str, Any] , _A : Optional[Any] , _A : List[str] , _A : Optional[Any] ) -> Any:
"""simple docstring"""
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
snake_case_ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
snake_case_ : List[Any] = [c for c in candidates if c > min_chunk_size]
snake_case_ : str = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_A : Any ) -> bool:
try:
with torch.no_grad():
fn(*_UpperCAmelCase , chunk_size=_UpperCAmelCase )
return True
except RuntimeError:
return False
snake_case_ : Any = 0
snake_case_ : Any = len(_UpperCAmelCase ) - 1
while i > min_viable_chunk_size_index:
snake_case_ : List[str] = test_chunk_size(candidates[i] )
if not viable:
snake_case_ : Optional[int] = (min_viable_chunk_size_index + i) // 2
else:
snake_case_ : Dict = i
snake_case_ : Union[str, Any] = (i + len(_UpperCAmelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCAmelCase_ ( self : Any , _A : List[str] , _A : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = True
for aa, aa in zip(_UpperCAmelCase , _UpperCAmelCase ):
assert type(_UpperCAmelCase ) == type(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case_ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda _A : x[0] )]
snake_case_ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda _A : x[0] )]
consistent &= self._compare_arg_caches(_UpperCAmelCase , _UpperCAmelCase )
else:
consistent &= aa == aa
return consistent
def UpperCAmelCase_ ( self : Union[str, Any] , _A : Dict , _A : Any , _A : Optional[Any] , ) -> Any:
"""simple docstring"""
snake_case_ : str = True
snake_case_ : tuple = tree_map(lambda _A : a.shape if isinstance(_UpperCAmelCase , torch.Tensor ) else a , _UpperCAmelCase , _UpperCAmelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_UpperCAmelCase )
snake_case_ : str = self._compare_arg_caches(self.cached_arg_data , _UpperCAmelCase )
else:
# Otherwise, we can reuse the precomputed value
snake_case_ : Optional[int] = False
if not consistent:
snake_case_ : List[Any] = self._determine_favorable_chunk_size(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
snake_case_ : str = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 352 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 | 0 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__UpperCAmelCase = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__UpperCAmelCase = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__UpperCAmelCase = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=4 , _UpperCamelCase=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = compute_bleu(
reference_corpus=_UpperCamelCase , translation_corpus=_UpperCamelCase , max_order=_UpperCamelCase , smooth=_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 29 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 85 | 0 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__A : Tuple = Vector()
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__lowerCamelCase ) , '''(0,0,0,0,0,1)''' )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = Vector([1, 2, 3, 4] )
self.assertEqual(len(__lowerCamelCase ) , 4 )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Union[str, Any] = Vector([1, 2] )
__A : List[str] = Vector([1, 2, 3, 4, 5] )
__A : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__A : Union[str, Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = Vector([1, 2, 3] )
__A : Tuple = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = Vector([1, 2, 3] )
__A : str = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = Vector([1, 2, 3] )
__A : List[str] = Vector([2, -1, 4] ) # for test of dot product
__A : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def UpperCamelCase__( self ):
'''simple docstring'''
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def UpperCamelCase__( self ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = Vector([1, 2, 3] )
__A : Union[str, Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __lowerCamelCase , __lowerCamelCase ) ) , '''(3,4,7)''' )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : int = Vector([1, 0, 0, 0, 0, 0] )
__A : List[Any] = x.copy()
self.assertEqual(str(__lowerCamelCase ) , str(__lowerCamelCase ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__lowerCamelCase ) , '''(0,1,0)''' )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(__lowerCamelCase ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__A : int = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__lowerCamelCase , __lowerCamelCase ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__A : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__lowerCamelCase , __lowerCamelCase ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[Any] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__A : Dict = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(__lowerCamelCase ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__A : str = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def UpperCamelCase__( self ):
'''simple docstring'''
__A : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__A : str = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def UpperCamelCase__( self ):
'''simple docstring'''
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 361 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
a_ = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """dinat"""
_lowerCamelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __lowerCamelCase=4 , __lowerCamelCase=3 , __lowerCamelCase=64 , __lowerCamelCase=[3, 4, 6, 5] , __lowerCamelCase=[2, 4, 8, 16] , __lowerCamelCase=7 , __lowerCamelCase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , __lowerCamelCase=3.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0 , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
__A : Dict = patch_size
__A : Union[str, Any] = num_channels
__A : str = embed_dim
__A : Optional[Any] = depths
__A : int = len(__lowerCamelCase )
__A : Union[str, Any] = num_heads
__A : Tuple = kernel_size
__A : Optional[int] = dilations
__A : Tuple = mlp_ratio
__A : Optional[int] = qkv_bias
__A : int = hidden_dropout_prob
__A : Dict = attention_probs_dropout_prob
__A : int = drop_path_rate
__A : Dict = hidden_act
__A : Any = layer_norm_eps
__A : Tuple = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__A : str = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
__A : List[Any] = layer_scale_init_value
__A : Any = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
__A , __A : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 291 | 0 |
import argparse
from collections import defaultdict
import yaml
A__ = """docs/source/en/_toctree.yml"""
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = defaultdict(snake_case )
_lowerCAmelCase = []
_lowerCAmelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(snake_case )
_lowerCAmelCase = new_doc_list
_lowerCAmelCase = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase = []
for duplicate_key in duplicates:
_lowerCAmelCase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(snake_case ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
_lowerCAmelCase = sorted(snake_case , key=lambda snake_case : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(snake_case ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(snake_case )
# Sort
return overview_doc
def _UpperCAmelCase ( snake_case=False ):
"""simple docstring"""
with open(snake_case , encoding="""utf-8""" ) as f:
_lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase = content[api_idx]["""sections"""]
# Then to the model doc
_lowerCAmelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase = api_doc[scheduler_idx]["""sections"""]
_lowerCAmelCase = clean_doc_toc(snake_case )
_lowerCAmelCase = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase = True
if overwrite:
_lowerCAmelCase = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase = api_doc
with open(snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(snake_case , allow_unicode=snake_case ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def _UpperCAmelCase ( snake_case=False ):
"""simple docstring"""
with open(snake_case , encoding="""utf-8""" ) as f:
_lowerCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase = content[api_idx]["""sections"""]
# Then to the model doc
_lowerCAmelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase = False
_lowerCAmelCase = api_doc[pipeline_idx]["""sections"""]
_lowerCAmelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase = pipeline_doc["""section"""]
_lowerCAmelCase = clean_doc_toc(snake_case )
if overwrite:
_lowerCAmelCase = new_sub_pipeline_doc
new_pipeline_docs.append(snake_case )
# sort overall pipeline doc
_lowerCAmelCase = clean_doc_toc(snake_case )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase = True
if overwrite:
_lowerCAmelCase = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase = api_doc
with open(snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(snake_case , allow_unicode=snake_case ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 82 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 | 1 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A (__lowerCamelCase :Any , __lowerCamelCase :str ):
_lowerCAmelCase = checkpoint
_lowerCAmelCase = {}
_lowerCAmelCase = vae_state_dict["""encoder.conv_in.weight"""]
_lowerCAmelCase = vae_state_dict["""encoder.conv_in.bias"""]
_lowerCAmelCase = vae_state_dict["""encoder.conv_out.weight"""]
_lowerCAmelCase = vae_state_dict["""encoder.conv_out.bias"""]
_lowerCAmelCase = vae_state_dict["""encoder.norm_out.weight"""]
_lowerCAmelCase = vae_state_dict["""encoder.norm_out.bias"""]
_lowerCAmelCase = vae_state_dict["""decoder.conv_in.weight"""]
_lowerCAmelCase = vae_state_dict["""decoder.conv_in.bias"""]
_lowerCAmelCase = vae_state_dict["""decoder.conv_out.weight"""]
_lowerCAmelCase = vae_state_dict["""decoder.conv_out.bias"""]
_lowerCAmelCase = vae_state_dict["""decoder.norm_out.weight"""]
_lowerCAmelCase = vae_state_dict["""decoder.norm_out.bias"""]
_lowerCAmelCase = vae_state_dict["""quant_conv.weight"""]
_lowerCAmelCase = vae_state_dict["""quant_conv.bias"""]
_lowerCAmelCase = vae_state_dict["""post_quant_conv.weight"""]
_lowerCAmelCase = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
_lowerCAmelCase = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
_lowerCAmelCase = {
layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(lowercase_ )
}
# Retrieves the keys for the decoder up blocks only
_lowerCAmelCase = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
_lowerCAmelCase = {
layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(lowercase_ )
}
for i in range(lowercase_ ):
_lowerCAmelCase = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key]
if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
_lowerCAmelCase = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.weight' )
_lowerCAmelCase = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.bias' )
_lowerCAmelCase = renew_vae_resnet_paths(lowercase_ )
_lowerCAmelCase = {"""old""": f'down.{i}.block', """new""": f'down_blocks.{i}.resnets'}
assign_to_checkpoint(lowercase_ , lowercase_ , lowercase_ , additional_replacements=[meta_path] , config=lowercase_ )
_lowerCAmelCase = [key for key in vae_state_dict if """encoder.mid.block""" in key]
_lowerCAmelCase = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_lowerCAmelCase = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key]
_lowerCAmelCase = renew_vae_resnet_paths(lowercase_ )
_lowerCAmelCase = {"""old""": f'mid.block_{i}', """new""": f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowercase_ , lowercase_ , lowercase_ , additional_replacements=[meta_path] , config=lowercase_ )
_lowerCAmelCase = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
_lowerCAmelCase = renew_vae_attention_paths(lowercase_ )
_lowerCAmelCase = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(lowercase_ , lowercase_ , lowercase_ , additional_replacements=[meta_path] , config=lowercase_ )
conv_attn_to_linear(lowercase_ )
for i in range(lowercase_ ):
_lowerCAmelCase = num_up_blocks - 1 - i
_lowerCAmelCase = [
key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key
]
if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
_lowerCAmelCase = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.weight'
]
_lowerCAmelCase = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.bias'
]
_lowerCAmelCase = renew_vae_resnet_paths(lowercase_ )
_lowerCAmelCase = {"""old""": f'up.{block_id}.block', """new""": f'up_blocks.{i}.resnets'}
assign_to_checkpoint(lowercase_ , lowercase_ , lowercase_ , additional_replacements=[meta_path] , config=lowercase_ )
_lowerCAmelCase = [key for key in vae_state_dict if """decoder.mid.block""" in key]
_lowerCAmelCase = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_lowerCAmelCase = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key]
_lowerCAmelCase = renew_vae_resnet_paths(lowercase_ )
_lowerCAmelCase = {"""old""": f'mid.block_{i}', """new""": f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowercase_ , lowercase_ , lowercase_ , additional_replacements=[meta_path] , config=lowercase_ )
_lowerCAmelCase = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
_lowerCAmelCase = renew_vae_attention_paths(lowercase_ )
_lowerCAmelCase = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(lowercase_ , lowercase_ , lowercase_ , additional_replacements=[meta_path] , config=lowercase_ )
conv_attn_to_linear(lowercase_ )
return new_checkpoint
def A (__lowerCamelCase :str , __lowerCamelCase :str , ):
# Only support V1
_lowerCAmelCase = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
_lowerCAmelCase = io.BytesIO(r.content )
_lowerCAmelCase = OmegaConf.load(lowercase_ )
_lowerCAmelCase = 512
_lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
_lowerCAmelCase = {}
with safe_open(lowercase_ , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
_lowerCAmelCase = f.get_tensor(lowercase_ )
else:
_lowerCAmelCase = torch.load(lowercase_ , map_location=lowercase_ )["""state_dict"""]
# Convert the VAE model.
_lowerCAmelCase = create_vae_diffusers_config(lowercase_ , image_size=lowercase_ )
_lowerCAmelCase = custom_convert_ldm_vae_checkpoint(lowercase_ , lowercase_ )
_lowerCAmelCase = AutoencoderKL(**lowercase_ )
vae.load_state_dict(lowercase_ )
vae.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
_lowercase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 357 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowercase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
_lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_lowercase = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_lowercase = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def A (__lowerCamelCase :str ):
_lowerCAmelCase = None
# source code of `config_class`
_lowerCAmelCase = inspect.getsource(__lowerCamelCase )
_lowerCAmelCase = _re_checkpoint.findall(__lowerCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
_lowerCAmelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_lowerCAmelCase = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_lowerCAmelCase = ckpt_name
break
return checkpoint
def A ():
_lowerCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_lowerCAmelCase = get_checkpoint_from_config_class(__lowerCamelCase )
_lowerCAmelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
_lowerCAmelCase = """\n""".join(sorted(__lowerCamelCase ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 229 | 0 |
"""simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[int] ) -> bool:
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCAmelCase__ ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> bool:
"""simple docstring"""
if curr_ind == len(_UpperCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_UpperCamelCase ) ):
if valid_connection(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
# Insert current vertex into path as next transition
snake_case = next_ver
# Validate created path
if util_hamilton_cycle(_UpperCamelCase , _UpperCamelCase , curr_ind + 1 ):
return True
# Backtrack
snake_case = -1
return False
def lowerCAmelCase__ ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : int = 0 ) -> list[int]:
"""simple docstring"""
snake_case = [-1] * (len(_UpperCamelCase ) + 1)
# initialize start and end of path with starting index
snake_case = snake_case = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_UpperCamelCase , _UpperCamelCase , 1 ) else []
| 150 | """simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : str=False , _UpperCamelCase : str=False ) -> Optional[Any]:
"""simple docstring"""
snake_case = 'backbone.' if is_semantic else ''
snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", 'beit.embeddings.cls_token'),
(f"""{prefix}patch_embed.proj.weight""", 'beit.embeddings.patch_embeddings.projection.weight'),
(f"""{prefix}patch_embed.proj.bias""", 'beit.embeddings.patch_embeddings.projection.bias'),
(f"""{prefix}pos_embed""", 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Tuple=False , _UpperCamelCase : Union[str, Any]=False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
snake_case = 'backbone.' if is_semantic else ''
# queries, keys and values
snake_case = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
snake_case = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
snake_case = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
snake_case = in_proj_weight[
: config.hidden_size, :
]
snake_case = q_bias
snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case = in_proj_weight[
-config.hidden_size :, :
]
snake_case = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
snake_case = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
snake_case = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
snake_case = gamma_a
snake_case = gamma_a
def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict ) -> Dict:
"""simple docstring"""
snake_case = dct.pop(_UpperCamelCase )
snake_case = val
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case = False if 'rvlcdip' in checkpoint_url else True
snake_case = BeitConfig(use_absolute_position_embeddings=_UpperCamelCase , use_mask_token=_UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
snake_case = 1_0_2_4
snake_case = 4_0_9_6
snake_case = 2_4
snake_case = 1_6
# labels
if "rvlcdip" in checkpoint_url:
snake_case = 1_6
snake_case = 'huggingface/label-files'
snake_case = 'rvlcdip-id2label.json'
snake_case = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
snake_case = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
snake_case = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='cpu' )['model']
snake_case = create_rename_keys(_UpperCamelCase , has_lm_head=_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , has_lm_head=_UpperCamelCase )
# load HuggingFace model
snake_case = BeitForMaskedImageModeling(_UpperCamelCase ) if has_lm_head else BeitForImageClassification(_UpperCamelCase )
model.eval()
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image
snake_case = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_UpperCamelCase )
snake_case = prepare_img()
snake_case = image_processor(images=_UpperCamelCase , return_tensors='pt' )
snake_case = encoding['pixel_values']
snake_case = model(_UpperCamelCase )
snake_case = outputs.logits
# verify logits
snake_case = [1, 1_6] if 'rvlcdip' in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(_UpperCamelCase ), "Shape of logits not as expected"
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
if has_lm_head:
snake_case = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
snake_case = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 150 | 1 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : int = old_name
if "patch_embed" in old_name:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = old_name.split("." )
if layer == "0":
UpperCAmelCase_ : List[str] = old_name.replace("0" ,"convolution1" )
elif layer == "1":
UpperCAmelCase_ : Any = old_name.replace("1" ,"batchnorm_before" )
elif layer == "3":
UpperCAmelCase_ : int = old_name.replace("3" ,"convolution2" )
else:
UpperCAmelCase_ : int = old_name.replace("4" ,"batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" ,A__ ):
UpperCAmelCase_ : Tuple = r"\b\d{2}\b"
if bool(re.search(A__ ,A__ ) ):
UpperCAmelCase_ : Any = re.search(r"\d\.\d\d." ,A__ ).group()
else:
UpperCAmelCase_ : Any = re.search(r"\d\.\d." ,A__ ).group()
if int(match[0] ) < 6:
UpperCAmelCase_ : Optional[Any] = old_name.replace(A__ ,"" )
UpperCAmelCase_ : Optional[int] = trimmed_name.replace("network" ,match[0] + ".meta4D_layers.blocks." + match[2:-1] )
UpperCAmelCase_ : Dict = "intermediate_stages." + trimmed_name
else:
UpperCAmelCase_ : Optional[Any] = old_name.replace(A__ ,"" )
if int(match[2] ) < num_meta4D_last_stage:
UpperCAmelCase_ : List[str] = trimmed_name.replace("network" ,"meta4D_layers.blocks." + match[2] )
else:
UpperCAmelCase_ : Dict = str(int(match[2] ) - num_meta4D_last_stage )
UpperCAmelCase_ : List[str] = trimmed_name.replace("network" ,"meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
UpperCAmelCase_ : List[Any] = trimmed_name.replace("norm1" ,"layernorm1" )
elif "norm2" in old_name:
UpperCAmelCase_ : Dict = trimmed_name.replace("norm2" ,"layernorm2" )
elif "fc1" in old_name:
UpperCAmelCase_ : Optional[Any] = trimmed_name.replace("fc1" ,"linear_in" )
elif "fc2" in old_name:
UpperCAmelCase_ : str = trimmed_name.replace("fc2" ,"linear_out" )
UpperCAmelCase_ : List[str] = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." ,A__ ):
UpperCAmelCase_ : Optional[int] = old_name.replace("network" ,"intermediate_stages" )
if "fc" in new_name:
UpperCAmelCase_ : Optional[int] = new_name.replace("fc" ,"convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
UpperCAmelCase_ : Optional[Any] = new_name.replace("norm1" ,"batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
UpperCAmelCase_ : List[str] = new_name.replace("norm2" ,"batchnorm_after" )
if "proj" in new_name:
UpperCAmelCase_ : List[Any] = new_name.replace("proj" ,"projection" )
if "dist_head" in new_name:
UpperCAmelCase_ : Union[str, Any] = new_name.replace("dist_head" ,"distillation_classifier" )
elif "head" in new_name:
UpperCAmelCase_ : Tuple = new_name.replace("head" ,"classifier" )
elif "patch_embed" in new_name:
UpperCAmelCase_ : Any = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
UpperCAmelCase_ : List[str] = new_name.replace("norm" ,"layernorm" )
UpperCAmelCase_ : str = "efficientformer." + new_name
else:
UpperCAmelCase_ : Union[str, Any] = "efficientformer.encoder." + new_name
return new_name
def snake_case ( A__ ,A__ ):
for key in checkpoint.copy().keys():
UpperCAmelCase_ : Optional[int] = checkpoint.pop(A__ )
UpperCAmelCase_ : Dict = val
return checkpoint
def snake_case ( ):
UpperCAmelCase_ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : Any = Image.open(requests.get(A__ ,stream=A__ ).raw )
return image
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = torch.load(A__ ,map_location="cpu" )["model"]
UpperCAmelCase_ : Optional[Any] = EfficientFormerConfig.from_json_file(A__ )
UpperCAmelCase_ : Optional[Any] = EfficientFormerForImageClassificationWithTeacher(A__ )
UpperCAmelCase_ : List[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
UpperCAmelCase_ : Dict = config.depths[-1] - config.num_metaad_blocks + 1
UpperCAmelCase_ : Union[str, Any] = convert_torch_checkpoint(A__ ,A__ )
model.load_state_dict(A__ )
model.eval()
UpperCAmelCase_ : Optional[int] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : int = 2_56
UpperCAmelCase_ : int = 2_24
UpperCAmelCase_ : Any = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} ,crop_size={"height": crop_size, "width": crop_size} ,resample=pillow_resamplings["bicubic"] ,)
UpperCAmelCase_ : List[str] = processor(images=A__ ,return_tensors="pt" ).pixel_values
# original processing pipeline
UpperCAmelCase_ : int = Compose(
[
Resize(A__ ,interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(A__ ),
ToTensor(),
Normalize(A__ ,A__ ),
] )
UpperCAmelCase_ : List[Any] = image_transforms(A__ ).unsqueeze(0 )
assert torch.allclose(A__ ,A__ )
UpperCAmelCase_ : Dict = model(A__ )
UpperCAmelCase_ : Any = outputs.logits
UpperCAmelCase_ : int = (1, 10_00)
if "l1" in model_name:
UpperCAmelCase_ : int = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] ,A__ ,atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
UpperCAmelCase_ : str = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] ,A__ ,atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
UpperCAmelCase_ : int = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(A__ )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" ,commit_message="Add model" ,use_temp_dir=A__ ,)
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" ,commit_message="Add image processor" ,use_temp_dir=A__ ,)
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase_ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 253 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
lowerCamelCase_ = {'''bert_for_seq_generation''': 512}
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = []
__magic_name__ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : int="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Tuple="<::::>" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Union[str, Any] , ) -> None:
UpperCAmelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
UpperCAmelCase_ : List[str] = vocab_file
UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_ : List[str] = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.__dict__.copy()
UpperCAmelCase_ : List[Any] = None
return state
def __setstate__( self : Dict , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Dict:
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : int ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Tuple = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
UpperCAmelCase_ : Tuple = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , "wb" ) as fi:
UpperCAmelCase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 253 | 1 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
_UpperCAmelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )]
# Reverse whole list
_UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
__A : List[str] = input("Enter numbers separated by a comma:\n").strip()
__A : List[Any] = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 260 |
"""simple docstring"""
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_UpperCAmelCase = 6
_UpperCAmelCase = 1
_UpperCAmelCase = 1901
_UpperCAmelCase = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_UpperCAmelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_UpperCAmelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_UpperCAmelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
_UpperCAmelCase = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 260 | 1 |
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
UpperCAmelCase: Tuple = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , ):
output_path.parent.mkdir(parents=UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCAmelCase__ , UpperCAmelCase__ , f=output_path.as_posix() , input_names=UpperCAmelCase__ , output_names=UpperCAmelCase__ , dynamic_axes=UpperCAmelCase__ , do_constant_folding=UpperCAmelCase__ , use_external_data_format=UpperCAmelCase__ , enable_onnx_checker=UpperCAmelCase__ , opset_version=UpperCAmelCase__ , )
else:
export(
UpperCAmelCase__ , UpperCAmelCase__ , f=output_path.as_posix() , input_names=UpperCAmelCase__ , output_names=UpperCAmelCase__ , dynamic_axes=UpperCAmelCase__ , do_constant_folding=UpperCAmelCase__ , opset_version=UpperCAmelCase__ , )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ):
_lowercase : List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_lowercase : Optional[Any] = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
_lowercase : Any = """cpu"""
_lowercase : List[Any] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=UpperCAmelCase__ ).to(UpperCAmelCase__ )
_lowercase : Any = Path(UpperCAmelCase__ )
# TEXT ENCODER
_lowercase : int = pipeline.text_encoder.config.max_position_embeddings
_lowercase : str = pipeline.text_encoder.config.hidden_size
_lowercase : List[str] = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=UpperCAmelCase__ , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=UpperCAmelCase__ , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=UpperCAmelCase__ , )
del pipeline.text_encoder
# UNET
_lowercase : Dict = pipeline.unet.config.in_channels
_lowercase : Union[str, Any] = pipeline.unet.config.sample_size
_lowercase : Optional[int] = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
torch.randn(2 ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
torch.randn(2 , UpperCAmelCase__ , UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
False,
) , output_path=UpperCAmelCase__ , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=UpperCAmelCase__ , use_external_data_format=UpperCAmelCase__ , )
_lowercase : Any = str(unet_path.absolute().as_posix() )
_lowercase : List[str] = os.path.dirname(UpperCAmelCase__ )
_lowercase : List[Any] = onnx.load(UpperCAmelCase__ )
# clean up existing tensor files
shutil.rmtree(UpperCAmelCase__ )
os.mkdir(UpperCAmelCase__ )
# collate external tensor files into one
onnx.save_model(
UpperCAmelCase__ , UpperCAmelCase__ , save_as_external_data=UpperCAmelCase__ , all_tensors_to_one_file=UpperCAmelCase__ , location="""weights.pb""" , convert_attribute=UpperCAmelCase__ , )
del pipeline.unet
# VAE ENCODER
_lowercase : Optional[Any] = pipeline.vae
_lowercase : Any = vae_encoder.config.in_channels
_lowercase : Tuple = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
_lowercase : int = lambda __UpperCAmelCase , __UpperCAmelCase : vae_encoder.encode(UpperCAmelCase__ , UpperCAmelCase__ )[0].sample()
onnx_export(
UpperCAmelCase__ , model_args=(
torch.randn(1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=UpperCAmelCase__ , )
# VAE DECODER
_lowercase : List[Any] = pipeline.vae
_lowercase : Optional[int] = vae_decoder.config.latent_channels
_lowercase : Optional[int] = vae_decoder.config.out_channels
# forward only through the decoder part
_lowercase : List[str] = vae_encoder.decode
onnx_export(
UpperCAmelCase__ , model_args=(
torch.randn(1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=UpperCAmelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
_lowercase : Dict = pipeline.safety_checker
_lowercase : Tuple = safety_checker.config.vision_config.num_channels
_lowercase : Union[str, Any] = safety_checker.config.vision_config.image_size
_lowercase : str = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
torch.randn(1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=UpperCAmelCase__ , )
del pipeline.safety_checker
_lowercase : Optional[Any] = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
_lowercase : str = pipeline.feature_extractor
else:
_lowercase : int = None
_lowercase : int = None
_lowercase : List[str] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(UpperCAmelCase__ )
print("""ONNX pipeline saved to""" , UpperCAmelCase__ )
del pipeline
del onnx_pipeline
_lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(UpperCAmelCase__ , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
UpperCAmelCase: Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
UpperCAmelCase: Dict = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 363 |
"""simple docstring"""
UpperCAmelCase: str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase: Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase: int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 336 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( A , unittest.TestCase ):
lowerCAmelCase_ = LxmertTokenizer
lowerCAmelCase_ = LxmertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
__lowercase =[
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def snake_case ( self : List[Any] , __lowercase : List[Any] ):
"""simple docstring"""
__lowercase ='UNwant\u00E9d,running'
__lowercase ='unwanted, running'
return input_text, output_text
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =self.tokenizer_class(self.vocab_file )
__lowercase =tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__lowercase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [7, 4, 5, 10, 8, 9] )
def snake_case ( self : int ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase =self.get_tokenizer()
__lowercase =self.get_rust_tokenizer()
__lowercase ='I was born in 92000, and this is falsé.'
__lowercase =tokenizer.tokenize(__lowercase )
__lowercase =rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__lowercase =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__lowercase =rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__lowercase =self.get_rust_tokenizer()
__lowercase =tokenizer.encode(__lowercase )
__lowercase =rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
| 141 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFImgaImgSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case ( self : int ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : List[str]=0 ):
"""simple docstring"""
if str(__lowercase ).startswith('mps' ):
__lowercase =torch.manual_seed(__lowercase )
else:
__lowercase =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowercase =floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase =floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def snake_case ( self : int ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case ( self : str ):
"""simple docstring"""
self._test_save_load_local()
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 141 | 1 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
A_ : int = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
A_ : Optional[Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(SCREAMING_SNAKE_CASE , 1 ):
if n < _p:
# then we have our last prime to check
A_ : Union[str, Any] = primes[:idx]
break
A_ , A_ : List[Any] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
A_ : Union[str, Any] = False
for r in range(SCREAMING_SNAKE_CASE ):
A_ : Dict = pow(SCREAMING_SNAKE_CASE , d * 2**r , SCREAMING_SNAKE_CASE )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
A_ : Optional[int] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _SCREAMING_SNAKE_CASE ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 65 |
from manim import *
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Optional[int] = Rectangle(height=0.5 , width=0.5 )
A_ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
A_ : Any = [mem.copy() for i in range(6 )]
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : str = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Optional[Any] = Text('''CPU''' , font_size=24 )
A_ : Union[str, Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = [mem.copy() for i in range(1 )]
A_ : Any = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Dict = Text('''GPU''' , font_size=24 )
A_ : List[str] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.align_to(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
gpu.set_x(gpu.get_x() - 1 )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = [mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = Text('''Model''' , font_size=24 )
A_ : List[Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.play(
Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , )
A_ : int = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
A_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : Any = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=2.5 ) , Write(_SCREAMING_SNAKE_CASE ) , Write(_SCREAMING_SNAKE_CASE ) )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Dict = []
A_ : int = []
A_ : Optional[Any] = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
cpu_target.move_to(_SCREAMING_SNAKE_CASE )
cpu_target.generate_target()
A_ : Union[str, Any] = 0.4_6 / 4
A_ : Any = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=_SCREAMING_SNAKE_CASE )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
cpu_targs.append(_SCREAMING_SNAKE_CASE )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_SCREAMING_SNAKE_CASE ) )
second_animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*_SCREAMING_SNAKE_CASE )
self.play(*_SCREAMING_SNAKE_CASE )
self.wait()
| 65 | 1 |
'''simple docstring'''
def _A ( snake_case ) -> Optional[Any]:
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
_lowercase : List[Any] = sorted(string.lower() )
return len(lowercase__ ) == len(set(lowercase__ ) )
if __name__ == "__main__":
_snake_case = input('Enter a string ').strip()
_snake_case = is_isogram(input_str)
print(F'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 250 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase ( A ):
def __init__( self : Optional[Any] , *__lowercase : str , **__lowercase : Union[str, Any] ):
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
__lowercase ={}
def snake_case ( self : Union[str, Any] , __lowercase : List[Any] , *__lowercase : Optional[int] , **__lowercase : int ):
"""simple docstring"""
__lowercase =super().add_tokens(__lowercase , *__lowercase , **__lowercase )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
' `placeholder_token` that is not already in the tokenizer.' )
def snake_case ( self : int , __lowercase : List[Any] , *__lowercase : Union[str, Any] , __lowercase : Dict=1 , **__lowercase : Dict ):
"""simple docstring"""
__lowercase =[]
if num_vec_per_token == 1:
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase )
output.append(__lowercase )
else:
__lowercase =[]
for i in range(__lowercase ):
__lowercase =placeholder_token + f'''_{i}'''
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase )
output.append(__lowercase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
__lowercase =output
def snake_case ( self : Tuple , __lowercase : Optional[int] , __lowercase : Optional[int]=False , __lowercase : Optional[int]=1.0 ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ):
__lowercase =[]
for i in range(len(__lowercase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__lowercase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__lowercase =self.token_map[placeholder_token]
__lowercase =tokens[: 1 + int(len(__lowercase ) * prop_tokens_to_load )]
if vector_shuffle:
__lowercase =copy.copy(__lowercase )
random.shuffle(__lowercase )
__lowercase =text.replace(__lowercase , ' '.join(__lowercase ) )
return text
def __call__( self : int , __lowercase : List[Any] , *__lowercase : Tuple , __lowercase : Optional[Any]=False , __lowercase : Dict=1.0 , **__lowercase : List[Any] ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , )
def snake_case ( self : Dict , __lowercase : List[str] , *__lowercase : Tuple , __lowercase : Dict=False , __lowercase : List[str]=1.0 , **__lowercase : Optional[int] ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , )
| 141 | 0 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : float = 0.0 , __lowerCamelCase : float = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 |
"""simple docstring"""
import numpy as np
from PIL import Image
def snake_case__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =np.array(__lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCamelCase__ : int =0
lowerCamelCase__ : int =0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : List[Any] =0
# compute the shape of the output matrix
lowerCamelCase__ : Union[str, Any] =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCamelCase__ : Union[str, Any] =np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCamelCase__ : str =np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Optional[int] =0
return updated_arr
def snake_case__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =np.array(__lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCamelCase__ : str =0
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : List[Any] =0
# compute the shape of the output matrix
lowerCamelCase__ : Dict =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCamelCase__ : Optional[Any] =np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCamelCase__ : Optional[int] =int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase__ : Optional[Any] =0
lowerCamelCase__ : int =0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
_lowercase : int = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 272 | 1 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = torch.nn.Linear(10 , 10)
a_ = torch.optim.SGD(model.parameters() , 0.1)
a_ = Accelerator()
a_ = accelerator.prepare(__lowercase)
try:
pickle.loads(pickle.dumps(__lowercase))
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''')
AcceleratorState._reset_state() | 243 |
'''simple docstring'''
import requests
a_ = 'YOUR API KEY'
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str = giphy_api_key ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any ='''+'''.join(query.split() )
SCREAMING_SNAKE_CASE__ : int =f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
SCREAMING_SNAKE_CASE__ : Dict =requests.get(UpperCamelCase__ ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship'))) | 152 | 0 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase =fname.split(os.path.sep )[-1]
return re.search(R"""^(.*)_\d+\.jpg$""" , __UpperCamelCase ).groups()[0]
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Union[str, Any]:
_lowerCAmelCase =file_names
_lowerCAmelCase =image_transform
_lowerCAmelCase =label_to_id
def __len__( self ) -> Union[str, Any]:
return len(self.file_names )
def __getitem__( self , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =self.file_names[idx]
_lowerCAmelCase =PIL.Image.open(__UpperCAmelCase )
_lowerCAmelCase =raw_image.convert("""RGB""" )
if self.image_transform is not None:
_lowerCAmelCase =self.image_transform(__UpperCAmelCase )
_lowerCAmelCase =extract_label(__UpperCAmelCase )
if self.label_to_id is not None:
_lowerCAmelCase =self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any:
# Initialize accelerator
if args.with_tracking:
_lowerCAmelCase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
_lowerCAmelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase =config["""lr"""]
_lowerCAmelCase =int(config["""num_epochs"""] )
_lowerCAmelCase =int(config["""seed"""] )
_lowerCAmelCase =int(config["""batch_size"""] )
_lowerCAmelCase =config["""image_size"""]
if not isinstance(__UpperCamelCase , (list, tuple) ):
_lowerCAmelCase =(image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , """isdigit""" ):
if args.checkpointing_steps == "epoch":
_lowerCAmelCase =args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_lowerCAmelCase =int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
_lowerCAmelCase =None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_lowerCAmelCase =os.path.split(__UpperCamelCase )[-1].split(""".""" )[0]
accelerator.init_trackers(__UpperCamelCase , __UpperCamelCase )
# Grab all the image filenames
_lowerCAmelCase =[os.path.join(args.data_dir , __UpperCamelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )]
# Build the label correspondences
_lowerCAmelCase =[extract_label(__UpperCamelCase ) for fname in file_names]
_lowerCAmelCase =list(set(__UpperCamelCase ) )
id_to_label.sort()
_lowerCAmelCase ={lbl: i for i, lbl in enumerate(__UpperCamelCase )}
# Set the seed before splitting the data.
np.random.seed(__UpperCamelCase )
torch.manual_seed(__UpperCamelCase )
torch.cuda.manual_seed_all(__UpperCamelCase )
# Split our filenames between train and validation
_lowerCAmelCase =np.random.permutation(len(__UpperCamelCase ) )
_lowerCAmelCase =int(0.8 * len(__UpperCamelCase ) )
_lowerCAmelCase =random_perm[:cut]
_lowerCAmelCase =random_perm[cut:]
# For training we use a simple RandomResizedCrop
_lowerCAmelCase =Compose([RandomResizedCrop(__UpperCamelCase , scale=(0.5, 1.0) ), ToTensor()] )
_lowerCAmelCase =PetsDataset(
[file_names[i] for i in train_split] , image_transform=__UpperCamelCase , label_to_id=__UpperCamelCase )
# For evaluation, we use a deterministic Resize
_lowerCAmelCase =Compose([Resize(__UpperCamelCase ), ToTensor()] )
_lowerCAmelCase =PetsDataset([file_names[i] for i in eval_split] , image_transform=__UpperCamelCase , label_to_id=__UpperCamelCase )
# Instantiate dataloaders.
_lowerCAmelCase =DataLoader(__UpperCamelCase , shuffle=__UpperCamelCase , batch_size=__UpperCamelCase , num_workers=4 )
_lowerCAmelCase =DataLoader(__UpperCamelCase , shuffle=__UpperCamelCase , batch_size=__UpperCamelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase =create_model("""resnet50d""" , pretrained=__UpperCamelCase , num_classes=len(__UpperCamelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase =model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_lowerCAmelCase =False
for param in model.get_classifier().parameters():
_lowerCAmelCase =True
# We normalize the batches of images to be a bit faster.
_lowerCAmelCase =torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device )
_lowerCAmelCase =torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase =torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_lowerCAmelCase =OneCycleLR(optimizer=__UpperCamelCase , max_lr=__UpperCamelCase , epochs=__UpperCamelCase , steps_per_epoch=len(__UpperCamelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase =0
# We also need to keep track of the starting epoch so files are named properly
_lowerCAmelCase =0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
_lowerCAmelCase =os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_lowerCAmelCase =[f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_lowerCAmelCase =dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_lowerCAmelCase =os.path.splitext(__UpperCamelCase )[0]
if "epoch" in training_difference:
_lowerCAmelCase =int(training_difference.replace("""epoch_""" , """""" ) ) + 1
_lowerCAmelCase =None
else:
_lowerCAmelCase =int(training_difference.replace("""step_""" , """""" ) )
_lowerCAmelCase =resume_step // len(__UpperCamelCase )
resume_step -= starting_epoch * len(__UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase , __UpperCamelCase ):
model.train()
if args.with_tracking:
_lowerCAmelCase =0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_lowerCAmelCase =accelerator.skip_first_batches(__UpperCamelCase , __UpperCamelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_lowerCAmelCase =train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowerCAmelCase ={k: v.to(accelerator.device ) for k, v in batch.items()}
_lowerCAmelCase =(batch["""image"""] - mean) / std
_lowerCAmelCase =model(__UpperCamelCase )
_lowerCAmelCase =torch.nn.functional.cross_entropy(__UpperCamelCase , batch["""label"""] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_lowerCAmelCase =F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_lowerCAmelCase =os.path.join(args.output_dir , __UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
model.eval()
_lowerCAmelCase =0
_lowerCAmelCase =0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowerCAmelCase ={k: v.to(accelerator.device ) for k, v in batch.items()}
_lowerCAmelCase =(batch["""image"""] - mean) / std
with torch.no_grad():
_lowerCAmelCase =model(__UpperCamelCase )
_lowerCAmelCase =outputs.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase =accelerator.gather_for_metrics((predictions, batch["""label"""]) )
_lowerCAmelCase =predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_lowerCAmelCase =accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
"""accuracy""": 100 * eval_metric,
"""train_loss""": total_loss.item() / len(__UpperCamelCase ),
"""epoch""": epoch,
} , step=__UpperCamelCase , )
if checkpointing_steps == "epoch":
_lowerCAmelCase =F'''epoch_{epoch}'''
if args.output_dir is not None:
_lowerCAmelCase =os.path.join(args.output_dir , __UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
if args.with_tracking:
accelerator.end_training()
def _lowerCamelCase() -> Optional[Any]:
_lowerCAmelCase =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument("""--data_dir""" , required=__UpperCamelCase , help="""The data folder on disk.""" )
parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--checkpointing_steps""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , )
parser.add_argument(
"""--output_dir""" , type=__UpperCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=__UpperCamelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase ={"""lr""": 3E-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 341 |
"""simple docstring"""
def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =len(__UpperCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _lowerCamelCase(__UpperCamelCase ) -> List[Any]:
if len(__UpperCamelCase ) <= 1:
return arr, 0
_lowerCAmelCase =len(__UpperCamelCase ) // 2
_lowerCAmelCase =arr[0:mid]
_lowerCAmelCase =arr[mid:]
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any:
_lowerCAmelCase =[]
_lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0
while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _lowerCamelCase() -> str:
_lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , __UpperCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
# an empty list should also have zero inversions
_lowerCAmelCase =[]
_lowerCAmelCase =count_inversions_bf(__UpperCamelCase )
_lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __UpperCamelCase )
if __name__ == "__main__":
main()
| 341 | 1 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
snake_case : Optional[int] = object()
# For specifying empty leaf dict `{}`
snake_case : Any = object()
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str ):
a__ = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
a__ = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def __lowercase ( __lowerCAmelCase : Any ):
def replace(__lowerCAmelCase : Tuple , __lowerCAmelCase : str ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def __lowercase ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __lowercase ( __lowerCAmelCase : str ):
a__ = _get_partition_rules()
a__ = _replacement_rules(__lowerCAmelCase )
a__ = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
a__ = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 240 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
snake_case : List[Any] = datasets.logging.get_logger(__name__)
snake_case : List[str] = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
snake_case : Tuple = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
snake_case : str = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Any="dummy_doc" ):
a__ = {doc: key_lines}
a__ = {doc: sys_lines}
a__ = {}
a__ = 0
a__ = 0
a__ = 0
a__ = 0
a__ = 0
a__ = 0
a__ , a__ = reader.get_doc_mentions(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
a__ = reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase )
a__ , a__ = reader.get_doc_mentions(__lowerCAmelCase , sys_doc_lines[doc] , __lowerCAmelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
a__ = reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase )
if remove_nested:
a__ , a__ = reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
a__ , a__ = reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
a__ = reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase )
a__ = reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase )
a__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'Number of resulting singleton clusters in the key '
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'files, respectively' )
return doc_coref_infos
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ):
a__ = get_coref_infos(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a__ = {}
a__ = 0
a__ = 0
for name, metric in metrics:
a__ , a__ , a__ = evaluator.evaluate_documents(__lowerCAmelCase , __lowerCAmelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(1_0 ) , F'Recall: {recall * 1_0_0:.2f}' , F' Precision: {precision * 1_0_0:.2f}' , F' F1: {fa * 1_0_0:.2f}' , )
if conll_subparts_num == 3:
a__ = (conll / 3) * 1_0_0
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'conll_score': conll} )
return output_scores
def __lowercase ( __lowerCAmelCase : str ):
a__ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
a__ = line.split()[5]
if not parse_col == "-":
a__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) ,codebase_urls=['https://github.com/ns-moosavi/coval'] ,reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] ,)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Dict=True ,__snake_case :Union[str, Any]=False ,__snake_case :Dict=False ,__snake_case :Union[str, Any]=False ) -> Tuple:
a__ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
a__ = util.check_gold_parse_annotation(__snake_case )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
a__ = evaluate(
key_lines=__snake_case ,sys_lines=__snake_case ,metrics=__snake_case ,NP_only=__snake_case ,remove_nested=__snake_case ,keep_singletons=__snake_case ,min_span=__snake_case ,)
return score
| 240 | 1 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a :Dict = "pt"
elif is_tf_available():
a :int = "tf"
else:
a :Optional[int] = "jax"
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = ByTaTokenizer
_SCREAMING_SNAKE_CASE :List[str] = False
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : List[Any] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _a ( self ) -> str:
"""simple docstring"""
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def _a ( self , **_a ) -> ByTaTokenizer:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def _a ( self , _a , _a=False , _a=20 , _a=5 ) -> Tuple[str, list]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = []
for i in range(len(_a ) ):
try:
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=_a )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE__ : List[str] = list(filter(lambda _a : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , _a ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(filter(lambda _a : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_a ) , _a ) )
if max_length is not None and len(_a ) > max_length:
SCREAMING_SNAKE_CASE__ : Any = toks[:max_length]
if min_length is not None and len(_a ) < min_length and len(_a ) > 0:
while len(_a ) < min_length:
SCREAMING_SNAKE_CASE__ : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE__ : Tuple = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
if " " not in output_txt and len(_a ) > 1:
SCREAMING_SNAKE_CASE__ : str = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_a )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_a )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE__ : List[str] = """ """ + output_txt
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode(_a , add_special_tokens=_a )
return output_txt, output_ids
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ : Any = """Unicode €."""
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , _a )
# decoding
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.decode(_a )
self.assertEqual(_a , """Unicode €.</s>""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer("""e è é ê ë""" )
SCREAMING_SNAKE_CASE__ : Any = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , _a )
# decoding
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.decode(_a )
self.assertEqual(_a , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer(_a , padding=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE__ : str = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE__ : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_a , _a )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_a , padding=_a , return_tensors=_a )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , _a )
self.assertIn("""attention_mask""" , _a )
self.assertNotIn("""decoder_input_ids""" , _a )
self.assertNotIn("""decoder_attention_mask""" , _a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(
text_target=_a , max_length=32 , padding="""max_length""" , truncation=_a , return_tensors=_a )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE__ : List[str] = ["""A long paragraph for summarization. </s>"""]
SCREAMING_SNAKE_CASE__ : Tuple = ["""Summary of the text. </s>"""]
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE__ : int = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(_a , text_target=_a )
self.assertEqual(_a , batch["""input_ids"""][0] )
self.assertEqual(_a , batch["""labels"""][0] )
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE__ : Optional[int] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : str = """ He is very happy, UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(_a , add_special_tokens=_a )
tokenizer.save_pretrained(_a )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.__class__.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : str = after_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
shutil.rmtree(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE__ : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Any = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode(_a , add_special_tokens=_a )
tokenizer.save_pretrained(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.__class__.from_pretrained(_a )
SCREAMING_SNAKE_CASE__ : List[str] = after_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE__ : str = tokenizer.__class__.from_pretrained(_a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_a )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_a )
with open(os.path.join(_a , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
SCREAMING_SNAKE_CASE__ : int = json.load(_a )
with open(os.path.join(_a , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
SCREAMING_SNAKE_CASE__ : int = json.load(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [f'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE__ : List[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
SCREAMING_SNAKE_CASE__ : Any = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(_a , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_a , _a )
with open(os.path.join(_a , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_a , _a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_class.from_pretrained(
_a , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE__ : List[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=_a )]
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer_class.from_pretrained(
_a , additional_special_tokens=_a , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_a )
SCREAMING_SNAKE_CASE__ : str = tokenizer_class.from_pretrained(_a )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def _a ( self ) -> Any:
"""simple docstring"""
pass
def _a ( self ) -> int:
"""simple docstring"""
pass
def _a ( self ) -> Tuple:
"""simple docstring"""
pass
def _a ( self ) -> List[str]:
"""simple docstring"""
pass
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizers(fast=_a , do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
SCREAMING_SNAKE_CASE__ : str = tokenizer.convert_tokens_to_string(_a )
self.assertIsInstance(_a , _a )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(
_a , skip_special_tokens=_a )
for attr in attributes_list:
setattr(_a , attr + """_id""" , _a )
self.assertEqual(getattr(_a , _a ) , _a )
self.assertEqual(getattr(_a , attr + """_id""" ) , _a )
setattr(_a , attr + """_id""" , _a )
self.assertEqual(getattr(_a , _a ) , _a )
self.assertEqual(getattr(_a , attr + """_id""" ) , _a )
setattr(_a , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(_a , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(_a , """additional_special_tokens_ids""" ) , [] )
setattr(_a , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(_a , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(_a , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 56 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number | (1 << position)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number & ~(1 << position)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number ^ (1 << position)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
return ((number >> position) & 1) == 1
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 1 |
'''simple docstring'''
from math import factorial, pi
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 80 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCAmelCase : int = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
a__ : Optional[int] =XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE )
a__ : Dict =finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
a__ : List[str] =finetuning_task
a__ : Tuple =GLUE_TASKS_NUM_LABELS[finetuning_task]
a__ : List[Any] =XLNetForSequenceClassification(SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
a__ : Optional[int] =finetuning_task
a__ : Dict =XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE )
else:
a__ : List[Any] =XLNetLMHeadModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(f'''Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
UpperCAmelCase : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 95 | 0 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class snake_case ( nn.Module):
def __init__( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().__init__()
_A = nn.Linear(3 , 4 )
_A = nn.BatchNormad(4 )
_A = nn.Linear(4 , 5 )
def a_ ( self : Optional[Any] , a__ : Dict ) -> Optional[int]:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(a__ ) ) )
class snake_case ( _UpperCamelCase):
def a_ ( self : List[str] , a__ : str , *a__ : Tuple , **a__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class snake_case ( _UpperCamelCase):
def a_ ( self : Optional[Any] , a__ : Dict , a__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return output + 1
class snake_case ( unittest.TestCase):
def a_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_A = ModelForTest()
_A = ModelHook()
add_hook_to_module(a__ , a__ )
self.assertEqual(test_model._hf_hook , a__ )
self.assertTrue(hasattr(a__ , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(a__ )
self.assertFalse(hasattr(a__ , "_hf_hook" ) )
self.assertFalse(hasattr(a__ , "_old_forward" ) )
def a_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_A = ModelForTest()
_A = ModelHook()
add_hook_to_module(a__ , a__ )
add_hook_to_module(a__ , a__ , append=a__ )
self.assertEqual(isinstance(test_model._hf_hook , a__ ) , a__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(a__ , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(a__ )
self.assertFalse(hasattr(a__ , "_hf_hook" ) )
self.assertFalse(hasattr(a__ , "_old_forward" ) )
def a_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_A = ModelForTest()
_A = torch.randn(2 , 3 )
_A = test_model(x + 1 )
_A = test_model(x + 2 )
_A = PreForwardHook()
add_hook_to_module(a__ , a__ )
_A = test_model(a__ )
self.assertTrue(torch.allclose(a__ , a__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_A = PreForwardHook()
add_hook_to_module(a__ , a__ )
_A = test_model(a__ )
self.assertTrue(torch.allclose(a__ , a__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_A = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(a__ , a__ )
_A = test_model(a__ )
assert torch.allclose(a__ , a__ , atol=1E-5 )
def a_ ( self : Any ) -> str:
'''simple docstring'''
_A = ModelForTest()
_A = torch.randn(2 , 3 )
_A = test_model(a__ )
_A = PostForwardHook()
add_hook_to_module(a__ , a__ )
_A = test_model(a__ )
self.assertTrue(torch.allclose(a__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_A = PostForwardHook()
add_hook_to_module(a__ , a__ )
_A = test_model(a__ )
self.assertTrue(torch.allclose(a__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_A = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(a__ , a__ )
_A = test_model(a__ )
assert torch.allclose(a__ , output + 2 , atol=1E-5 )
def a_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_A = ModelForTest()
_A = torch.randn(2 , 3 )
_A = test_model(a__ )
_A = PostForwardHook()
add_hook_to_module(a__ , a__ )
_A = test_model(a__ )
self.assertTrue(torch.allclose(a__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_A = True
_A = test_model(a__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_A = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_A = torch.randn(2 , 3 )
_A = model(a__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(a__ , AlignDevicesHook(io_same_device=a__ ) )
_A = torch.randn(2 , 3 ).to(0 )
_A = model(a__ )
self.assertEqual(output.device , torch.device(0 ) )
def a_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_A = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
_A = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**a__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**a__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**a__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_A = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device , a__ )
_A = torch.randn(2 , 3 )
_A = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
_A = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**a__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**a__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**a__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
_A = torch.randn(2 , 3 )
_A = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def a_ ( self : str ) -> List[Any]:
'''simple docstring'''
_A = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
_A = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(a__ , execution_device=a__ , offload=a__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_A = torch.device(a__ )
self.assertEqual(model.batchnorm.running_mean.device , a__ )
_A = torch.randn(2 , 3 )
_A = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(a__ , execution_device=a__ , offload=a__ , offload_buffers=a__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
_A = torch.randn(2 , 3 )
_A = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def a_ ( self : str ) -> Optional[int]:
'''simple docstring'''
_A = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
_A = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
a__ , execution_device=a__ , offload=a__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_A = torch.device(a__ )
self.assertEqual(model.batchnorm.running_mean.device , a__ )
_A = torch.randn(2 , 3 )
_A = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
a__ , execution_device=a__ , offload=a__ , weights_map=model.state_dict() , offload_buffers=a__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
_A = torch.randn(2 , 3 )
_A = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a__ )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) | 163 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def a__ ( __lowercase ) -> List[Any]:
_A = 384
if "tiny" in model_name:
_A = [3, 3, 9, 3]
_A = [96, 192, 384, 768]
if "small" in model_name:
_A = [3, 3, 27, 3]
_A = [96, 192, 384, 768]
if "base" in model_name:
_A = [3, 3, 27, 3]
_A = [128, 256, 512, 1024]
_A = 512
if "large" in model_name:
_A = [3, 3, 27, 3]
_A = [192, 384, 768, 1536]
_A = 768
if "xlarge" in model_name:
_A = [3, 3, 27, 3]
_A = [256, 512, 1024, 2048]
_A = 1024
# set label information
_A = 150
_A = "huggingface/label-files"
_A = "ade20k-id2label.json"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = {v: k for k, v in idalabel.items()}
_A = ConvNextConfig(
depths=__lowercase , hidden_sizes=__lowercase , out_features=["stage1", "stage2", "stage3", "stage4"] )
_A = UperNetConfig(
backbone_config=__lowercase , auxiliary_in_channels=__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase , )
return config
def a__ ( __lowercase ) -> List[Any]:
_A = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.{j}.gamma""", f"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((f"""backbone.downsample_layers.{i}.0.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.0.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
_A = dct.pop(__lowercase )
_A = val
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
_A = model_name_to_url[model_name]
_A = torch.hub.load_state_dict_from_url(__lowercase , map_location="cpu" )["state_dict"]
_A = get_upernet_config(__lowercase )
_A = UperNetForSemanticSegmentation(__lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_A = state_dict.pop(__lowercase )
if "bn" in key:
_A = key.replace("bn" , "batch_norm" )
_A = val
# rename keys
_A = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
model.load_state_dict(__lowercase )
# verify on image
_A = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert("RGB" )
_A = SegformerImageProcessor()
_A = processor(__lowercase , return_tensors="pt" ).pixel_values
with torch.no_grad():
_A = model(__lowercase )
if model_name == "upernet-convnext-tiny":
_A = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
_A = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
_A = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
_A = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
_A = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowercase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__lowercase )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f'''upernet-convnext-{size}''' for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 163 | 1 |
import re
def __UpperCAmelCase ( __a : str ) -> bool:
"""simple docstring"""
_a : List[str] = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(_lowerCamelCase ,_lowerCamelCase ) )
if __name__ == "__main__":
a__ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 235 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self :List[str] , a :Any , a :List[Any]=1_3 , a :Any=3_0 , a :Dict=2 , a :List[str]=3 , a :Union[str, Any]=True , a :Optional[int]=True , a :Tuple=3_2 , a :Any=5 , a :List[str]=4 , a :Optional[Any]=3_7 , a :List[Any]="gelu" , a :Any=0.1 , a :Any=0.1 , a :List[str]=1_0 , a :str=0.02 , ) -> Union[str, Any]:
__UpperCamelCase : int = parent
__UpperCamelCase : List[Any] = batch_size
__UpperCamelCase : Optional[Any] = image_size
__UpperCamelCase : int = patch_size
__UpperCamelCase : Optional[Any] = num_channels
__UpperCamelCase : Optional[int] = is_training
__UpperCamelCase : Union[str, Any] = use_labels
__UpperCamelCase : Tuple = hidden_size
__UpperCamelCase : List[Any] = num_hidden_layers
__UpperCamelCase : List[Any] = num_attention_heads
__UpperCamelCase : Dict = intermediate_size
__UpperCamelCase : Optional[int] = hidden_act
__UpperCamelCase : List[Any] = hidden_dropout_prob
__UpperCamelCase : str = attention_probs_dropout_prob
__UpperCamelCase : List[str] = type_sequence_label_size
__UpperCamelCase : List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase : Union[str, Any] = (image_size // patch_size) ** 2
__UpperCamelCase : Optional[int] = num_patches + 1
def _lowerCamelCase ( self :Optional[int] ) -> Union[str, Any]:
__UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : Tuple = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowerCamelCase ( self :Tuple , a :Optional[int] , a :Optional[int] ) -> Optional[int]:
__UpperCamelCase : Optional[Any] = FlaxViTModel(config=a )
__UpperCamelCase : int = model(a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase : Any = (self.image_size, self.image_size)
__UpperCamelCase : Optional[int] = (self.patch_size, self.patch_size)
__UpperCamelCase : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowerCamelCase ( self :Dict , a :List[str] , a :Any ) -> str:
__UpperCamelCase : Dict = self.type_sequence_label_size
__UpperCamelCase : Dict = FlaxViTForImageClassification(config=a )
__UpperCamelCase : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase : Dict = 1
__UpperCamelCase : Optional[Any] = FlaxViTForImageClassification(a )
__UpperCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase : str = model(a )
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : str = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : List[Any] = config_and_inputs
__UpperCamelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowerCamelCase ( self :Optional[int] ) -> None:
__UpperCamelCase : int = FlaxViTModelTester(self )
__UpperCamelCase : Any = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def _lowerCamelCase ( self :List[Any] ) -> str:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self :List[Any] ) -> List[str]:
__UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self :List[str] ) -> Union[str, Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
def _lowerCamelCase ( self :Tuple ) -> Any:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : List[Any] = model_class(a )
__UpperCamelCase : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : Dict = [*signature.parameters.keys()]
__UpperCamelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self :str ) -> Any:
__UpperCamelCase , __UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase : List[Any] = self._prepare_for_class(a , a )
__UpperCamelCase : List[Any] = model_class(a )
@jax.jit
def model_jitted(a :List[str] , **a :List[Any] ):
return model(pixel_values=a , **a )
with self.subTest("JIT Enabled" ):
__UpperCamelCase : int = model_jitted(**a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__UpperCamelCase : Optional[Any] = model_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self :Dict ) -> str:
for model_class_name in self.all_model_classes:
__UpperCamelCase : List[Any] = model_class_name.from_pretrained("google/vit-base-patch16-224" )
__UpperCamelCase : Optional[int] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(a ) | 232 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = inspect.getfile(accelerate.test_utils)
_UpperCamelCase = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_UpperCamelCase = test_metrics
@require_cpu
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1)
@require_cpu
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
debug_launcher(self.test_metrics.main)
@require_single_gpu
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
print(F'''Found {torch.cuda.device_count()} devices.''')
_UpperCamelCase = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(UpperCAmelCase_ , env=os.environ.copy())
| 365 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'dandelin/vilt-b32-finetuned-vqa'
lowercase__ = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
lowercase__ = 'image_qa'
lowercase__ = AutoProcessor
lowercase__ = AutoModelForVisualQuestionAnswering
lowercase__ = ['image', 'text']
lowercase__ = ['text']
def __init__( self , *__a , **__a) -> int:
'''simple docstring'''
requires_backends(self , ['''vision'''])
super().__init__(*__a , **__a)
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
return self.pre_processor(__a , __a , return_tensors='''pt''')
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
with torch.no_grad():
return self.model(**__a).logits
def UpperCAmelCase ( self , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = outputs.argmax(-1).item()
return self.model.config.idalabel[idx]
| 100 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 |
"""simple docstring"""
import math
def _snake_case ( lowercase__ ):
return math.sqrt(lowercase__ ) * math.sqrt(lowercase__ ) == num
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[Any] = n
while left <= right:
_lowerCamelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_lowerCamelCase : str = mid - 1
else:
_lowerCamelCase : Optional[int] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowercase : str = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowercase : List[Any] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowercase : str = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, float]:
_snake_case = len([g for position, g in enumerate(__A ) if g == main_target[position]] )
return (item, float(__A ))
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> tuple[str, str]:
_snake_case = random.randint(0 , len(__A ) - 1 )
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
_snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
_snake_case = list(__A )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_snake_case = random.choice(__A )
return "".join(__A )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , ) -> list[str]:
_snake_case = []
# Generate more children proportionally to the fitness score.
_snake_case = int(parent_a[1] * 100 ) + 1
_snake_case = 10 if child_n >= 10 else child_n
for _ in range(__A ):
_snake_case = population_score[random.randint(0 , __A )][0]
_snake_case , _snake_case = crossover(parent_a[0] , __A )
# Append new string to the population list.
pop.append(mutate(__A , __A ) )
pop.append(mutate(__A , __A ) )
return pop
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_snake_case = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(__A )
# Verify that the target contains no genes besides the ones inside genes variable.
_snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_snake_case = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(__A )
# Generate random starting population.
_snake_case = []
for _ in range(__A ):
population.append(''.join([random.choice(__A ) for i in range(len(__A ) )] ) )
# Just some logs to know what the algorithms is doing.
_snake_case , _snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__A )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_snake_case = [evaluate(__A , __A ) for item in population]
# Check if there is a matching evolution.
_snake_case = sorted(__A , key=lambda __A : x[1] , reverse=__A )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__A )
# Normalize population score to be between 0 and 1.
_snake_case = [
(item, score / len(__A )) for item, score in population_score
]
# This is selection
for i in range(__A ):
population.extend(select(population_score[int(__A )] , __A , __A ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__A ) > N_POPULATION:
break
if __name__ == "__main__":
lowercase : Tuple = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
lowercase : Tuple = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
lowercase : int = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 356 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
_snake_case = img
_snake_case = img.shape[1]
_snake_case = img.shape[0]
_snake_case = dst_width
_snake_case = dst_height
_snake_case = self.src_w / self.dst_w
_snake_case = self.src_h / self.dst_h
_snake_case = _snake_case = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def lowerCamelCase ( self ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_snake_case = self.img[self.get_y(lowerCAmelCase_ )][self.get_x(lowerCAmelCase_ )]
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return int(self.ratio_x * x )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
lowercase , lowercase : Optional[Any] = 800, 600
lowercase : Tuple = imread("image_data/lena.jpg", 1)
lowercase : Any = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 160 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def A (__A : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = image.size
UpperCAmelCase_ , UpperCAmelCase_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase_ = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
UpperCAmelCase_ = np.array(__A ).astype(np.floataa ) / 255.0
UpperCAmelCase_ = image[None].transpose(0 , 3 , 1 , 2 )
UpperCAmelCase_ = torch.from_numpy(__A )
return 2.0 * image - 1.0
class __snake_case ( a ):
def __init__( self : int , _snake_case : VQModel , _snake_case : UNetaDModel , _snake_case : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_snake_case , unet=_snake_case , scheduler=_snake_case)
@torch.no_grad()
def __call__( self : List[Any] , _snake_case : Union[torch.Tensor, PIL.Image.Image] = None , _snake_case : Optional[int] = 1 , _snake_case : Optional[int] = 100 , _snake_case : Optional[float] = 0.0 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , ):
"""simple docstring"""
if isinstance(_snake_case , PIL.Image.Image):
UpperCAmelCase_ = 1
elif isinstance(_snake_case , torch.Tensor):
UpperCAmelCase_ = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_snake_case)}""")
if isinstance(_snake_case , PIL.Image.Image):
UpperCAmelCase_ = preprocess(_snake_case)
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCAmelCase_ = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCAmelCase_ = next(self.unet.parameters()).dtype
UpperCAmelCase_ = randn_tensor(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case)
UpperCAmelCase_ = image.to(device=self.device , dtype=_snake_case)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_snake_case , device=self.device)
UpperCAmelCase_ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
UpperCAmelCase_ = {}
if accepts_eta:
UpperCAmelCase_ = eta
for t in self.progress_bar(_snake_case):
# concat latents and low resolution image in the channel dimension.
UpperCAmelCase_ = torch.cat([latents, image] , dim=1)
UpperCAmelCase_ = self.scheduler.scale_model_input(_snake_case , _snake_case)
# predict the noise residual
UpperCAmelCase_ = self.unet(_snake_case , _snake_case).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
# decode the image latents with the VQVAE
UpperCAmelCase_ = self.vqvae.decode(_snake_case).sample
UpperCAmelCase_ = torch.clamp(_snake_case , -1.0 , 1.0)
UpperCAmelCase_ = image / 2 + 0.5
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(_snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case)
| 51 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE = 1000000 ) ->int:
a__: int = limit + 1
a__: Optional[int] = [0] * limit
for first_term in range(1 , _SCREAMING_SNAKE_CASE ):
for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__: Any = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 290 | 0 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
SCREAMING_SNAKE_CASE_: Optional[Any] ={'UserAgent': UserAgent().random}
def lowerCAmelCase_ ( snake_case_ : int ) -> dict:
'''simple docstring'''
UpperCAmelCase_ = script.contents[0]
UpperCAmelCase_ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __A :
def __init__(self : Any , __a : Union[str, Any] ):
UpperCAmelCase_ = f"""https://www.instagram.com/{username}/"""
UpperCAmelCase_ = self.get_json()
def _lowercase (self : Any ):
UpperCAmelCase_ = requests.get(self.url , headers=__a ).text
UpperCAmelCase_ = BeautifulSoup(__a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self : int ):
return f"""{self.__class__.__name__}('{self.username}')"""
def __str__(self : Union[str, Any] ):
return f"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def _lowercase (self : Dict ):
return self.user_data["username"]
@property
def _lowercase (self : Dict ):
return self.user_data["full_name"]
@property
def _lowercase (self : str ):
return self.user_data["biography"]
@property
def _lowercase (self : Optional[Any] ):
return self.user_data["business_email"]
@property
def _lowercase (self : Dict ):
return self.user_data["external_url"]
@property
def _lowercase (self : Union[str, Any] ):
return self.user_data["edge_followed_by"]["count"]
@property
def _lowercase (self : Optional[int] ):
return self.user_data["edge_follow"]["count"]
@property
def _lowercase (self : Optional[Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _lowercase (self : int ):
return self.user_data["profile_pic_url_hd"]
@property
def _lowercase (self : List[Any] ):
return self.user_data["is_verified"]
@property
def _lowercase (self : Optional[Any] ):
return self.user_data["is_private"]
def lowerCAmelCase_ ( snake_case_ : str = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCAmelCase_ = InstagramUser(snake_case_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , snake_case_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_: int =InstagramUser('github')
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 106 | '''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
SCREAMING_SNAKE_CASE_: Dict =True
except ImportError:
SCREAMING_SNAKE_CASE_: str =False
try:
from torch.hub import _get_torch_home
SCREAMING_SNAKE_CASE_: Optional[Any] =_get_torch_home()
except ImportError:
SCREAMING_SNAKE_CASE_: Union[str, Any] =os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
SCREAMING_SNAKE_CASE_: int =os.path.join(torch_cache_home, 'transformers')
SCREAMING_SNAKE_CASE_: Tuple ='https://cdn.huggingface.co'
SCREAMING_SNAKE_CASE_: str ='https://s3.amazonaws.com/models.huggingface.co/bert'
SCREAMING_SNAKE_CASE_: str ='/'.join(str(Path(__file__).resolve()).split('/')[:-1])
SCREAMING_SNAKE_CASE_: Optional[Any] =os.path.join(PATH, 'config.yaml')
SCREAMING_SNAKE_CASE_: Optional[Any] =os.path.join(PATH, 'attributes.txt')
SCREAMING_SNAKE_CASE_: Any =os.path.join(PATH, 'objects.txt')
SCREAMING_SNAKE_CASE_: Optional[int] =os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
SCREAMING_SNAKE_CASE_: int =os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
SCREAMING_SNAKE_CASE_: List[str] =os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
SCREAMING_SNAKE_CASE_: str ='pytorch_model.bin'
SCREAMING_SNAKE_CASE_: Dict ='config.yaml'
def lowerCAmelCase_ ( snake_case_ : Optional[int]=OBJECTS , snake_case_ : Optional[Any]=ATTRIBUTES ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
UpperCAmelCase_ = []
with open(snake_case_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
with open(snake_case_ , "rb" ) as f:
UpperCAmelCase_ = pkl.load(snake_case_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
UpperCAmelCase_ = ckp.pop(snake_case_ )
if isinstance(snake_case_ , np.ndarray ):
UpperCAmelCase_ = torch.tensor(snake_case_ )
else:
assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ )
UpperCAmelCase_ = v
return r
class __A :
a__ : Optional[Any] = {}
def __init__(self : Union[str, Any] , __a : dict , __a : str = "root" , __a : str=0 ):
UpperCAmelCase_ = name
UpperCAmelCase_ = level
UpperCAmelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
UpperCAmelCase_ = copy.deepcopy(__a )
UpperCAmelCase_ = copy.deepcopy(__a )
if isinstance(__a , __a ):
UpperCAmelCase_ = Config(__a , name=__a , level=level + 1 )
UpperCAmelCase_ = v
setattr(self , __a , __a )
UpperCAmelCase_ = d
def __repr__(self : List[Any] ):
return str(list((self._pointer.keys()) ) )
def __setattr__(self : int , __a : str , __a : Dict ):
UpperCAmelCase_ = val
UpperCAmelCase_ = val
UpperCAmelCase_ = key.split("." )
UpperCAmelCase_ = len(__a ) - 1
UpperCAmelCase_ = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
UpperCAmelCase_ = val
else:
UpperCAmelCase_ = pointer[l]
def _lowercase (self : Optional[Any] ):
return self._pointer
def _lowercase (self : int , __a : Union[str, Any] , __a : str ):
with open(f"""{file_name}""" , "w" ) as stream:
dump(__a , __a )
def _lowercase (self : Any , __a : Optional[Any] , __a : List[str] ):
with open(f"""{file_name}""" , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def _lowercase (__a : str ):
with open(__a ) as stream:
UpperCAmelCase_ = load(__a , Loader=__a )
return data
def __str__(self : Dict ):
UpperCAmelCase_ = " "
if self._name != "root":
UpperCAmelCase_ = f"""{t * (self._level-1)}{self._name}:\n"""
else:
UpperCAmelCase_ = ""
UpperCAmelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n"""
UpperCAmelCase_ = level
return r[:-1]
@classmethod
def _lowercase (cls : Tuple , __a : str , **__a : Dict ):
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def _lowercase (cls : Any , __a : str , **__a : Dict ):
UpperCAmelCase_ = kwargs.pop("cache_dir" , __a )
UpperCAmelCase_ = kwargs.pop("force_download" , __a )
UpperCAmelCase_ = kwargs.pop("resume_download" , __a )
UpperCAmelCase_ = kwargs.pop("proxies" , __a )
UpperCAmelCase_ = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
UpperCAmelCase_ = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
UpperCAmelCase_ = pretrained_model_name_or_path
else:
UpperCAmelCase_ = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
UpperCAmelCase_ = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
UpperCAmelCase_ = Config.load_yaml(__a )
except EnvironmentError:
UpperCAmelCase_ = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def lowerCAmelCase_ ( snake_case_ : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = torch.load("dump.pt" , map_location=in_tensor.device )
UpperCAmelCase_ = in_tensor.numpy()
UpperCAmelCase_ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %"""
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = urlparse(snake_case_ )
return parsed.scheme in ("http", "https")
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[int]=True ) -> str:
'''simple docstring'''
UpperCAmelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
UpperCAmelCase_ = "/" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int]=None , snake_case_ : List[Any]=0 , snake_case_ : int=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(snake_case_ , snake_case_ ):
ua += "; " + "; ".join("{}/{}".format(snake_case_ , snake_case_ ) for k, v in user_agent.items() )
elif isinstance(snake_case_ , snake_case_ ):
ua += "; " + user_agent
UpperCAmelCase_ = {"user-agent": ua}
if resume_size > 0:
UpperCAmelCase_ = "bytes=%d-" % (resume_size,)
UpperCAmelCase_ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ )
if response.status_code == 4_16: # Range not satisfiable
return
UpperCAmelCase_ = response.headers.get("Content-Length" )
UpperCAmelCase_ = resume_size + int(snake_case_ ) if content_length is not None else None
UpperCAmelCase_ = tqdm(
unit="B" , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(snake_case_ ) )
temp_file.write(snake_case_ )
progress.close()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : str=None , snake_case_ : List[str]=False , snake_case_ : List[str]=None , snake_case_ : int=10 , snake_case_ : Any=False , snake_case_ : int=None , snake_case_ : str=False , ) -> str:
'''simple docstring'''
if cache_dir is None:
UpperCAmelCase_ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ = str(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCAmelCase_ = None
if not local_files_only:
try:
UpperCAmelCase_ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ )
if response.status_code == 2_00:
UpperCAmelCase_ = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
UpperCAmelCase_ = url_to_filename(snake_case_ , snake_case_ )
# get cache path to put the file
UpperCAmelCase_ = os.path.join(snake_case_ , snake_case_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(snake_case_ ):
return cache_path
else:
UpperCAmelCase_ = [
file
for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(snake_case_ ) > 0:
return os.path.join(snake_case_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(snake_case_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
UpperCAmelCase_ = cache_path + ".lock"
with FileLock(snake_case_ ):
# If the download just completed while the lock was activated.
if os.path.exists(snake_case_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
UpperCAmelCase_ = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(snake_case_ , "a+b" ) as f:
yield f
UpperCAmelCase_ = _resumable_file_manager
if os.path.exists(snake_case_ ):
UpperCAmelCase_ = os.stat(snake_case_ ).st_size
else:
UpperCAmelCase_ = 0
else:
UpperCAmelCase_ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ )
UpperCAmelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , snake_case_ , temp_file.name , )
http_get(
snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , )
os.replace(temp_file.name , snake_case_ )
UpperCAmelCase_ = {"url": url, "etag": etag}
UpperCAmelCase_ = cache_path + ".json"
with open(snake_case_ , "w" ) as meta_file:
json.dump(snake_case_ , snake_case_ )
return cache_path
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = url.encode("utf-8" )
UpperCAmelCase_ = shaaaa(snake_case_ )
UpperCAmelCase_ = url_hash.hexdigest()
if etag:
UpperCAmelCase_ = etag.encode("utf-8" )
UpperCAmelCase_ = shaaaa(snake_case_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Tuple=None , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : List[Any]=False , snake_case_ : Any=None , snake_case_ : Any=False , snake_case_ : List[str]=False , snake_case_ : str=False , ) -> Union[str, Any]:
'''simple docstring'''
if cache_dir is None:
UpperCAmelCase_ = TRANSFORMERS_CACHE
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ = str(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ = str(snake_case_ )
if is_remote_url(snake_case_ ):
# URL, so get it from the cache (downloading if necessary)
UpperCAmelCase_ = get_from_cache(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , )
elif os.path.exists(snake_case_ ):
# File, and it exists.
UpperCAmelCase_ = url_or_filename
elif urlparse(snake_case_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(snake_case_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(snake_case_ ) )
if extract_compressed_file:
if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
UpperCAmelCase_ , UpperCAmelCase_ = os.path.split(snake_case_ )
UpperCAmelCase_ = output_file.replace("." , "-" ) + "-extracted"
UpperCAmelCase_ = os.path.join(snake_case_ , snake_case_ )
if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
UpperCAmelCase_ = output_path + ".lock"
with FileLock(snake_case_ ):
shutil.rmtree(snake_case_ , ignore_errors=snake_case_ )
os.makedirs(snake_case_ )
if is_zipfile(snake_case_ ):
with ZipFile(snake_case_ , "r" ) as zip_file:
zip_file.extractall(snake_case_ )
zip_file.close()
elif tarfile.is_tarfile(snake_case_ ):
UpperCAmelCase_ = tarfile.open(snake_case_ )
tar_file.extractall(snake_case_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(snake_case_ ) )
return output_path_extracted
return output_path
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int]="," ) -> int:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
with open(snake_case_ ) as f:
UpperCAmelCase_ = eval(f.read() )
else:
UpperCAmelCase_ = requests.get(snake_case_ )
try:
UpperCAmelCase_ = requests.json()
except Exception:
UpperCAmelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
UpperCAmelCase_ = eval(snake_case_ )
except Exception:
UpperCAmelCase_ = data.split("\n" )
req.close()
return data
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = requests.get(snake_case_ )
UpperCAmelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(snake_case_ )
with open(snake_case_ , "rb" ) as stream:
UpperCAmelCase_ = pkl.load(snake_case_ )
UpperCAmelCase_ = weights.pop("model" )
UpperCAmelCase_ = {}
for k, v in model.items():
UpperCAmelCase_ = torch.from_numpy(snake_case_ )
if "running_var" in k:
UpperCAmelCase_ = torch.tensor([0] )
UpperCAmelCase_ = k.replace("running_var" , "num_batches_tracked" )
UpperCAmelCase_ = zero
return new
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
print(f"""{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb""" )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Any="RGB" ) -> Dict:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ):
UpperCAmelCase_ = cva.imread(snake_case_ )
else:
UpperCAmelCase_ = get_image_from_url(snake_case_ )
assert img is not None, f"""could not connect to: {im}"""
UpperCAmelCase_ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
UpperCAmelCase_ = img[:, :, ::-1]
return img
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Union[str, Any]=1 ) -> str:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ ))
| 106 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] =logging.get_logger(__name__)
A__ : List[Any] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: int = '''markuplm'''
def __init__( self : Dict , __snake_case : Union[str, Any]=3_05_22 , __snake_case : Tuple=7_68 , __snake_case : Any=12 , __snake_case : List[str]=12 , __snake_case : str=30_72 , __snake_case : List[Any]="gelu" , __snake_case : List[str]=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=5_12 , __snake_case : List[str]=2 , __snake_case : int=0.02 , __snake_case : List[str]=1E-1_2 , __snake_case : Optional[int]=0 , __snake_case : str=0 , __snake_case : Tuple=2 , __snake_case : Tuple=2_56 , __snake_case : int=10_24 , __snake_case : Union[str, Any]=2_16 , __snake_case : List[str]=10_01 , __snake_case : Any=32 , __snake_case : Tuple=50 , __snake_case : str="absolute" , __snake_case : Tuple=True , __snake_case : List[Any]=None , **__snake_case : Union[str, Any] , ) -> List[Any]:
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case , )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
# additional properties
_lowerCAmelCase = max_depth
_lowerCAmelCase = max_xpath_tag_unit_embeddings
_lowerCAmelCase = max_xpath_subs_unit_embeddings
_lowerCAmelCase = tag_pad_id
_lowerCAmelCase = subs_pad_id
_lowerCAmelCase = xpath_unit_hidden_size
| 70 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCAmelCase :
_lowercase: List[str]
_lowercase: Optional[str] = None
# Automatically constructed
_lowercase: ClassVar[str] = "dict"
_lowercase: ClassVar[Any] = None
_lowercase: str = field(default='''Translation''' , init=snake_case_ , repr=snake_case_ )
def __call__( self : Optional[int] ) -> Optional[int]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase__ ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class UpperCAmelCase :
_lowercase: Optional[List] = None
_lowercase: Optional[int] = None
_lowercase: Optional[str] = None
# Automatically constructed
_lowercase: ClassVar[str] = "dict"
_lowercase: ClassVar[Any] = None
_lowercase: str = field(default='''TranslationVariableLanguages''' , init=snake_case_ , repr=snake_case_ )
def lowercase__ ( self : Any ) -> Optional[Any]:
_lowerCAmelCase = sorted(set(self.languages ) ) if self.languages else None
_lowerCAmelCase = len(self.languages ) if self.languages else None
def __call__( self : List[str] ) -> Optional[Any]:
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def lowercase__ ( self : Optional[Any] , __snake_case : Tuple ) -> Any:
_lowerCAmelCase = set(self.languages )
if self.languages and set(__snake_case ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_lowerCAmelCase = []
for lang, text in translation_dict.items():
if isinstance(__snake_case , __snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_lowerCAmelCase , _lowerCAmelCase = zip(*sorted(__snake_case ) )
return {"language": languages, "translation": translations}
def lowercase__ ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 70 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def lowerCAmelCase__ ( ):
'''simple docstring'''
print('''Making key files...''')
make_key_files('''rsa''' ,1024)
print('''Key files generation successful.''')
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
print('''Generating prime p...''')
lowerCAmelCase__ : Optional[int] = rabinMiller.generate_large_prime(lowerCamelCase_)
print('''Generating prime q...''')
lowerCAmelCase__ : List[str] = rabinMiller.generate_large_prime(lowerCamelCase_)
lowerCAmelCase__ : str = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''')
while True:
lowerCAmelCase__ : str = random.randrange(2 ** (key_size - 1) ,2 ** (key_size))
if cryptoMath.gcd(lowerCamelCase_ ,(p - 1) * (q - 1)) == 1:
break
print('''Calculating d that is mod inverse of e...''')
lowerCAmelCase__ : Any = cryptoMath.find_mod_inverse(lowerCamelCase_ ,(p - 1) * (q - 1))
lowerCAmelCase__ : Union[str, Any] = (n, e)
lowerCAmelCase__ : List[Any] = (n, d)
return (public_key, private_key)
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : int):
'''simple docstring'''
if os.path.exists(f"""{name}_pubkey.txt""") or os.path.exists(f"""{name}_privkey.txt"""):
print('''\nWARNING:''')
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''')
sys.exit()
lowerCAmelCase__ : Tuple = generate_key(lowerCamelCase_)
print(f"""\nWriting public key to file {name}_pubkey.txt...""")
with open(f"""{name}_pubkey.txt""" ,'''w''') as out_file:
out_file.write(f"""{key_size},{public_key[0]},{public_key[1]}""")
print(f"""Writing private key to file {name}_privkey.txt...""")
with open(f"""{name}_privkey.txt""" ,'''w''') as out_file:
out_file.write(f"""{key_size},{private_key[0]},{private_key[1]}""")
if __name__ == "__main__":
main()
| 364 |
__snake_case : Any ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : Tuple =[{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : Tuple ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 94 | 0 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase__ = logging.get_logger(__name__)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = set()
UpperCAmelCase : Optional[int] = []
def parse_line(UpperCAmelCase_ ):
for line in fp:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(UpperCAmelCase_ ) > 0:
UpperCAmelCase : str = '\n'.join(UpperCAmelCase_ )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(UpperCAmelCase_ )
buffer.clear()
continue
else:
UpperCAmelCase : Union[str, Any] = line.strip()
buffer.append(UpperCAmelCase_ )
if from_gh:
for filename in os.listdir(UpperCAmelCase_ ):
UpperCAmelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if not os.path.isdir(UpperCAmelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(UpperCAmelCase_ ) as fp:
parse_line(UpperCAmelCase_ )
else:
try:
with zipfile.ZipFile(UpperCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(UpperCAmelCase_ ) as fp:
parse_line(UpperCAmelCase_ )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = set()
UpperCAmelCase : int = [os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) for p in os.listdir(UpperCAmelCase_ ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(UpperCAmelCase_ , UpperCAmelCase_ ) )
return selected_warnings
if __name__ == "__main__":
def UpperCamelCase( UpperCAmelCase_ ):
return values.split(',' )
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
lowercase__ = parser.parse_args()
lowercase__ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase__ = extract_warnings(args.output_dir, args.targets)
lowercase__ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 151 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = "▁"
lowercase__ = {"vocab_file": "spiece.model"}
lowercase__ = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
lowercase__ = {
"google/reformer-crime-and-punishment": 524288,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowercase_ : Dict , lowercase_ : Tuple="</s>" , lowercase_ : Dict="<unk>" , lowercase_ : Tuple=[] , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : List[str] , ) -> None:
UpperCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase : List[Any] = vocab_file
UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self : List[str] ) -> Dict[str, int]:
UpperCAmelCase : int = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> str:
UpperCAmelCase : Tuple = self.__dict__.copy()
UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any ) -> List[str]:
UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase : Dict = {}
UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple ) -> Optional[int]:
return self.sp_model.piece_to_id(lowercase_ )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> List[str]:
if index < self.sp_model.get_piece_size():
UpperCAmelCase : Tuple = self.sp_model.IdToPiece(lowercase_ )
return token
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Dict = []
UpperCAmelCase : int = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
UpperCAmelCase : Any = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : int = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 151 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __magic_name__ ( _a, unittest.TestCase):
UpperCamelCase__ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Tuple=0 ):
lowercase_ : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case_ ) )
lowercase_ : List[str] = np.random.RandomState(snake_case_ )
lowercase_ : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase_ : Tuple = self.get_dummy_inputs()
lowercase_ : Union[str, Any] = pipe(**snake_case_ ).images
lowercase_ : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
lowercase_ : int = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowercase_ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase_ : Dict = self.get_dummy_inputs()
lowercase_ : int = pipe(**snake_case_ ).images
lowercase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase_ : Tuple = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowercase_ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
# warmup pass to apply optimizations
lowercase_ : List[Any] = pipe(**self.get_dummy_inputs() )
lowercase_ : List[str] = self.get_dummy_inputs()
lowercase_ : Optional[int] = pipe(**snake_case_ ).images
lowercase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase_ : Any = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowercase_ : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase_ : Union[str, Any] = self.get_dummy_inputs()
lowercase_ : List[Any] = pipe(**snake_case_ ).images
lowercase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase_ : Optional[Any] = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowercase_ : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase_ : Tuple = self.get_dummy_inputs()
lowercase_ : Tuple = pipe(**snake_case_ ).images
lowercase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase_ : int = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowercase_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase_ : List[str] = self.get_dummy_inputs()
lowercase_ : List[str] = pipe(**snake_case_ ).images
lowercase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowercase_ : List[str] = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Tuple = ort.SessionOptions()
lowercase_ : Optional[Any] = False
return options
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowercase_ : str = init_image.resize((768, 512) )
# using the PNDM scheduler by default
lowercase_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase_ : Dict = """A fantasy landscape, trending on artstation"""
lowercase_ : str = np.random.RandomState(0 )
lowercase_ : Union[str, Any] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="""np""" , )
lowercase_ : str = output.images
lowercase_ : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
lowercase_ : Optional[Any] = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowercase_ : List[Any] = init_image.resize((768, 512) )
lowercase_ : Tuple = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
lowercase_ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase_ : Union[str, Any] = """A fantasy landscape, trending on artstation"""
lowercase_ : Optional[int] = np.random.RandomState(0 )
lowercase_ : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="""np""" , )
lowercase_ : Any = output.images
lowercase_ : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
lowercase_ : Tuple = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 370 | '''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int = 1000000 ) -> int:
lowercase_ : List[Any] = limit + 1
lowercase_ : Optional[Any] = [0] * limit
for first_term in range(1 , UpperCAmelCase__ ):
for n in range(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 21 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: int = ["pixel_values"]
def __init__( self : int , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 255 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Any , ):
super().__init__(**A )
_UpperCAmelCase : Dict = size if size is not None else {"shortest_edge": 256}
_UpperCAmelCase : Optional[int] = get_size_dict(A , default_to_square=A )
_UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCAmelCase : List[str] = get_size_dict(A )
_UpperCAmelCase : str = do_resize
_UpperCAmelCase : Union[str, Any] = size
_UpperCAmelCase : Any = resample
_UpperCAmelCase : int = do_center_crop
_UpperCAmelCase : Union[str, Any] = crop_size
_UpperCAmelCase : int = do_rescale
_UpperCAmelCase : List[Any] = rescale_factor
_UpperCAmelCase : List[Any] = do_normalize
_UpperCAmelCase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _A ( self : Optional[int] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : Tuple , ):
_UpperCAmelCase : List[Any] = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCAmelCase : int = get_resize_output_image_size(A , size=size["shortest_edge"] , default_to_square=A )
return resize(A , size=A , resample=A , data_format=A , **A )
def _A ( self : Optional[int] , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : str , ):
_UpperCAmelCase : int = get_size_dict(A )
return center_crop(A , size=(size["height"], size["width"]) , data_format=A , **A )
def _A ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Any ):
return rescale(A , scale=A , data_format=A , **A )
def _A ( self : List[Any] , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : int , ):
return normalize(A , mean=A , std=A , data_format=A , **A )
def _A ( self : Optional[int] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : Dict , ):
_UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Any = size if size is not None else self.size
_UpperCAmelCase : List[str] = get_size_dict(A , default_to_square=A )
_UpperCAmelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCAmelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : Optional[int] = get_size_dict(A )
_UpperCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std
_UpperCAmelCase : List[Any] = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase : Dict = [to_numpy_array(A ) for image in images]
if do_resize:
_UpperCAmelCase : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
_UpperCAmelCase : Optional[Any] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
_UpperCAmelCase : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
_UpperCAmelCase : Optional[Any] = [self.normalize(image=A , mean=A , std=A ) for image in images]
_UpperCAmelCase : List[str] = [to_channel_dimension_format(A , A ) for image in images]
_UpperCAmelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=A , tensor_type=A )
| 31 | '''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
def UpperCamelCase_ ( _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : int = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
_UpperCAmelCase : List[Any] = MaskFormerConfig(backbone_config=_UpperCAmelCase )
_UpperCAmelCase : Tuple = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
_UpperCAmelCase : Dict = 847
_UpperCAmelCase : Any = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
_UpperCAmelCase : Any = 150
_UpperCAmelCase : Any = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
_UpperCAmelCase : Tuple = 171
_UpperCAmelCase : Union[str, Any] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
_UpperCAmelCase : Any = 133
_UpperCAmelCase : int = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
_UpperCAmelCase : Optional[int] = 19
_UpperCAmelCase : str = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
_UpperCAmelCase : Optional[int] = 65
_UpperCAmelCase : Tuple = "mapillary-vistas-id2label.json"
_UpperCAmelCase : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
return config
def UpperCamelCase_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = dct.pop(_UpperCAmelCase )
_UpperCAmelCase : List[str] = val
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCAmelCase : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCAmelCase : Any = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_UpperCAmelCase : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : List[str] = in_proj_weight[:dim, :]
_UpperCAmelCase : Tuple = in_proj_bias[: dim]
_UpperCAmelCase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
_UpperCAmelCase : List[str] = in_proj_bias[
dim : dim * 2
]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
-dim :, :
]
_UpperCAmelCase : Dict = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase_ ( _UpperCAmelCase : Dict , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_UpperCAmelCase : Dict = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_UpperCAmelCase : Dict = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : int = in_proj_weight[: hidden_size, :]
_UpperCAmelCase : Union[str, Any] = in_proj_bias[:config.hidden_size]
_UpperCAmelCase : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_UpperCAmelCase : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCAmelCase : int = in_proj_weight[-hidden_size :, :]
_UpperCAmelCase : Optional[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_UpperCAmelCase : Optional[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_UpperCAmelCase : Tuple = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Any = in_proj_weight[: hidden_size, :]
_UpperCAmelCase : Tuple = in_proj_bias[:config.hidden_size]
_UpperCAmelCase : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_UpperCAmelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
_UpperCAmelCase : Optional[int] = in_proj_weight[-hidden_size :, :]
_UpperCAmelCase : Union[str, Any] = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCamelCase_ ( ) -> torch.Tensor:
"""simple docstring"""
_UpperCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Any = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = get_maskformer_config(_UpperCAmelCase )
# load original state_dict
with open(_UpperCAmelCase , "rb" ) as f:
_UpperCAmelCase : Optional[int] = pickle.load(_UpperCAmelCase )
_UpperCAmelCase : Optional[int] = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_UpperCAmelCase : Any = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_swin_q_k_v(_UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
_UpperCAmelCase : Tuple = torch.from_numpy(_UpperCAmelCase )
# load 🤗 model
_UpperCAmelCase : Union[str, Any] = MaskFormerForInstanceSegmentation(_UpperCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(_UpperCAmelCase , param.shape )
_UpperCAmelCase , _UpperCAmelCase : Any = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_UpperCAmelCase ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_UpperCAmelCase : Optional[int] = prepare_img()
if "vistas" in model_name:
_UpperCAmelCase : int = 65
elif "cityscapes" in model_name:
_UpperCAmelCase : Tuple = 65_535
else:
_UpperCAmelCase : Any = 255
_UpperCAmelCase : Optional[Any] = True if "ade" in model_name else False
_UpperCAmelCase : Optional[int] = MaskFormerImageProcessor(ignore_index=_UpperCAmelCase , reduce_labels=_UpperCAmelCase )
_UpperCAmelCase : Optional[int] = image_processor(_UpperCAmelCase , return_tensors="pt" )
_UpperCAmelCase : List[Any] = model(**_UpperCAmelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_UpperCAmelCase : Tuple = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 31 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __magic_name__ (__lowerCAmelCase ):
lowerCamelCase__ = '''distilbert'''
lowerCamelCase__ = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _a=30522 , _a=512 , _a=False , _a=6 , _a=12 , _a=768 , _a=4 * 768 , _a=0.1 , _a=0.1 , _a="gelu" , _a=0.0_2 , _a=0.1 , _a=0.2 , _a=0 , **_a , ) -> Tuple:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = sinusoidal_pos_embds
lowerCAmelCase_ = n_layers
lowerCAmelCase_ = n_heads
lowerCAmelCase_ = dim
lowerCAmelCase_ = hidden_dim
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = qa_dropout
lowerCAmelCase_ = seq_classif_dropout
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ )
class __magic_name__ (__lowerCAmelCase ):
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 364 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCamelCase = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 69 | 1 |
from ... import PretrainedConfig
__a = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCAmelCase = '''nezha'''
def __init__( self ,_SCREAMING_SNAKE_CASE=21_128 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-12 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> Optional[Any]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : str = max_relative_position
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : Any = classifier_dropout
UpperCAmelCase_ : List[str] = use_cache | 369 |
from collections import defaultdict
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = first_str.lower().strip()
UpperCAmelCase_ : Any = second_str.lower().strip()
# Remove whitespace
UpperCAmelCase_ : Any = first_str.replace(''' ''' , '''''' )
UpperCAmelCase_ : int = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(_lowercase ) != len(_lowercase ):
return False
# Default values for count should be 0
UpperCAmelCase_ : defaultdict[str, int] = defaultdict(_lowercase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowercase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__a = input('Enter the first string ').strip()
__a = input('Enter the second string ').strip()
__a = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""") | 235 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : str =logging.get_logger(__name__)
lowerCamelCase : Any ={
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __a ( UpperCAmelCase__ ):
_lowerCAmelCase : Union[str, Any] = '''ibert'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=3_05_22 , SCREAMING_SNAKE_CASE : str=7_68 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[int]=12 , SCREAMING_SNAKE_CASE : Union[str, Any]=30_72 , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : str=5_12 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Dict=0.0_2 , SCREAMING_SNAKE_CASE : Dict=1e-1_2 , SCREAMING_SNAKE_CASE : Any=1 , SCREAMING_SNAKE_CASE : Optional[Any]=0 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Any="absolute" , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : List[str]="none" , **SCREAMING_SNAKE_CASE : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
UpperCamelCase__ : List[Any] = vocab_size
UpperCamelCase__ : Optional[Any] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : Optional[int] = num_attention_heads
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : Tuple = intermediate_size
UpperCamelCase__ : Any = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : str = max_position_embeddings
UpperCamelCase__ : Union[str, Any] = type_vocab_size
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : List[str] = layer_norm_eps
UpperCamelCase__ : str = position_embedding_type
UpperCamelCase__ : Tuple = quant_mode
UpperCamelCase__ : Any = force_dequant
class __a ( UpperCAmelCase__ ):
@property
def __lowercase ( self : int ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 189 |
import functools
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
if not isinstance(_A , _A ) or not all(isinstance(_A , _A ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_A ) != 3 or not all(isinstance(_A , _A ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_A ) == 0:
return 0
if min(_A ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_A ) >= 366:
raise ValueError("All days elements should be less than 366" )
snake_case_ = set(_A )
@functools.cache
def dynamic_programming(_A ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187 | 0 |
"""simple docstring"""
__A = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.6_02_17_66_34E-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.355_818,
}
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_lowerCAmelCase =(
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {', '.join(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" )
_lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" )
_lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" )
_lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" )
_lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
_lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple:
_lowerCAmelCase =[]
for old_item in old_list:
_lowerCAmelCase =old_item
_lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" )
_lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" )
_lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
_lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
_lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_lowerCAmelCase =old_checkpoint[path]
_lowerCAmelCase =old_tensor.shape[0] // 3
_lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3
_lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 )
_lowerCAmelCase =query.reshape(__UpperCamelCase )
_lowerCAmelCase =key.reshape(__UpperCamelCase )
_lowerCAmelCase =value.reshape(__UpperCamelCase )
for path in paths:
_lowerCAmelCase =path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
_lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
_lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0]
else:
_lowerCAmelCase =old_checkpoint[path["""old"""]]
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
_lowerCAmelCase ={}
_lowerCAmelCase =checkpoint["""time_embed.0.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.0.bias"""]
_lowerCAmelCase =checkpoint["""time_embed.2.weight"""]
_lowerCAmelCase =checkpoint["""time_embed.2.bias"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""]
_lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""]
_lowerCAmelCase =checkpoint["""out.0.weight"""]
_lowerCAmelCase =checkpoint["""out.0.bias"""]
_lowerCAmelCase =checkpoint["""out.2.weight"""]
_lowerCAmelCase =checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
_lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
_lowerCAmelCase ={
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 , __UpperCamelCase ):
_lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
_lowerCAmelCase =checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
_lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase )
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''input_blocks.{i}.1''',
"""new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''input_blocks.{i}.1.qkv.bias''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , )
_lowerCAmelCase =middle_blocks[0]
_lowerCAmelCase =middle_blocks[1]
_lowerCAmelCase =middle_blocks[2]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
_lowerCAmelCase =i // (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =i % (config["""num_res_blocks"""] + 1)
_lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]]
_lowerCAmelCase ={}
for layer in output_block_layers:
_lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
_lowerCAmelCase =[layer_name]
if len(__UpperCamelCase ) > 1:
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
_lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase )
_lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
_lowerCAmelCase =checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
_lowerCAmelCase =[]
if len(__UpperCamelCase ):
_lowerCAmelCase =renew_attention_paths(__UpperCamelCase )
_lowerCAmelCase ={
"""old""": F'''output_blocks.{i}.1''',
"""new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
_lowerCAmelCase ={
F'''output_blocks.{i}.1.qkv.bias''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , )
else:
_lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] )
_lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] )
_lowerCAmelCase =checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__A = parser.parse_args()
__A = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__A = json.loads(f.read())
__A = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__A = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 341 | 1 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
random.seed(UpperCAmelCase_ )
np.random.seed(UpperCAmelCase_ )
torch.manual_seed(UpperCAmelCase_ )
torch.cuda.manual_seed_all(UpperCAmelCase_ )
# ^^ safe to call this function even if cuda is not available
class a :
def __init__( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple = 0.9_999 , __lowerCAmelCase : Any = 0.0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Any = False , __lowerCAmelCase : int = 1.0 , __lowerCAmelCase : Dict = 2 / 3 , __lowerCAmelCase : Optional[Any] = None , __lowerCAmelCase : Dict = None , **__lowerCAmelCase : Optional[int] , ):
if isinstance(_lowerCamelCase , torch.nn.Module ):
_UpperCAmelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , _lowerCamelCase , standard_warn=_lowerCamelCase , )
_UpperCAmelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase = True
if kwargs.get("""max_value""" , _lowerCamelCase ) is not None:
_UpperCAmelCase = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate("""max_value""" , """1.0.0""" , _lowerCamelCase , standard_warn=_lowerCamelCase )
_UpperCAmelCase = kwargs['''max_value''']
if kwargs.get("""min_value""" , _lowerCamelCase ) is not None:
_UpperCAmelCase = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate("""min_value""" , """1.0.0""" , _lowerCamelCase , standard_warn=_lowerCamelCase )
_UpperCAmelCase = kwargs['''min_value''']
_UpperCAmelCase = list(_lowerCamelCase )
_UpperCAmelCase = [p.clone().detach() for p in parameters]
if kwargs.get("""device""" , _lowerCamelCase ) is not None:
_UpperCAmelCase = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate("""device""" , """1.0.0""" , _lowerCamelCase , standard_warn=_lowerCamelCase )
self.to(device=kwargs["""device"""] )
_UpperCAmelCase = None
_UpperCAmelCase = decay
_UpperCAmelCase = min_decay
_UpperCAmelCase = update_after_step
_UpperCAmelCase = use_ema_warmup
_UpperCAmelCase = inv_gamma
_UpperCAmelCase = power
_UpperCAmelCase = 0
_UpperCAmelCase = None # set in `step()`
_UpperCAmelCase = model_cls
_UpperCAmelCase = model_config
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : int ):
_UpperCAmelCase = model_cls.load_config(_lowerCamelCase , return_unused_kwargs=_lowerCamelCase )
_UpperCAmelCase = model_cls.from_pretrained(_lowerCamelCase )
_UpperCAmelCase = cls(model.parameters() , model_cls=_lowerCamelCase , model_config=model.config )
ema_model.load_state_dict(_lowerCamelCase )
return ema_model
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Tuple ):
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
_UpperCAmelCase = self.model_cls.from_config(self.model_config )
_UpperCAmelCase = self.state_dict()
state_dict.pop("""shadow_params""" , _lowerCamelCase )
model.register_to_config(**_lowerCamelCase )
self.copy_to(model.parameters() )
model.save_pretrained(_lowerCamelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase = (1 + step) / (10 + step)
_UpperCAmelCase = min(_lowerCamelCase , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase = max(_lowerCamelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Any ):
if isinstance(_lowerCamelCase , torch.nn.Module ):
_UpperCAmelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , _lowerCamelCase , standard_warn=_lowerCamelCase , )
_UpperCAmelCase = parameters.parameters()
_UpperCAmelCase = list(_lowerCamelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase = self.get_decay(self.optimization_step )
_UpperCAmelCase = decay
_UpperCAmelCase = 1 - decay
_UpperCAmelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _lowerCamelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase = deepspeed.zero.GatheredParameters(_lowerCamelCase , modifier_rank=_lowerCamelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_lowerCamelCase )
def lowerCAmelCase_ ( self : Optional[int] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = list(_lowerCamelCase )
for s_param, param in zip(self.shadow_params , _lowerCamelCase ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]=None ):
_UpperCAmelCase = [
p.to(device=_lowerCamelCase , dtype=_lowerCamelCase ) if p.is_floating_point() else p.to(device=_lowerCamelCase )
for p in self.shadow_params
]
def lowerCAmelCase_ ( self : int ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Optional[int] ):
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , _lowerCamelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase = None
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = copy.deepcopy(_lowerCamelCase )
_UpperCAmelCase = state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
_UpperCAmelCase = state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , _lowerCamelCase ):
raise ValueError("""Invalid min_decay""" )
_UpperCAmelCase = state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , _lowerCamelCase ):
raise ValueError("""Invalid optimization_step""" )
_UpperCAmelCase = state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , _lowerCamelCase ):
raise ValueError("""Invalid update_after_step""" )
_UpperCAmelCase = state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _lowerCamelCase ):
raise ValueError("""Invalid use_ema_warmup""" )
_UpperCAmelCase = state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
_UpperCAmelCase = state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
_UpperCAmelCase = state_dict.get("""shadow_params""" , _lowerCamelCase )
if shadow_params is not None:
_UpperCAmelCase = shadow_params
if not isinstance(self.shadow_params , _lowerCamelCase ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(_lowerCamelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 289 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(self , **_lowerCamelCase )
a :Union[str, Any] = repo_info
a :int = token
a :int = None
def SCREAMING_SNAKE_CASE__ ( self ):
if self.dir_cache is None:
a :Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a :List[Any] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(_lowerCamelCase ): {'''name''': str(_lowerCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , **_lowerCamelCase , ):
if not isinstance(self.repo_info , _lowerCamelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
a :Optional[int] = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , **_lowerCamelCase ):
self._get_dirs()
a :Union[str, Any] = self._strip_protocol(_lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ):
self._get_dirs()
a :str = PurePosixPath(path.strip('''/''' ) )
a :Tuple = {}
for p, f in self.dir_cache.items():
a :Optional[int] = PurePosixPath(p.strip('''/''' ) )
a :str = p.parent
if root == path:
a :List[str] = f
a :Any = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 94 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = ['model.decoder.embed_positions.weights']
def lowercase__ ( __UpperCamelCase )-> Tuple:
if "emb" in name:
UpperCamelCase = name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
UpperCamelCase = name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
UpperCamelCase = name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
UpperCamelCase = name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
UpperCamelCase = name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
UpperCamelCase = name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
UpperCamelCase = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
UpperCamelCase = name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
UpperCamelCase = name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
UpperCamelCase = name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCamelCase = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Tuple[Dict, Dict]:
UpperCamelCase = list(state_dict.keys() )
UpperCamelCase = {}
for key in keys:
UpperCamelCase = state_dict.pop(__UpperCamelCase )
UpperCamelCase = rename_keys(__UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCamelCase = val[:hidden_size, :]
UpperCamelCase = val[hidden_size : 2 * hidden_size, :]
UpperCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCamelCase = val
else:
UpperCamelCase = val
return state_dict, enc_dec_proj_state_dict
def lowercase__ ( __UpperCamelCase )-> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
UpperCamelCase = 1024
UpperCamelCase = 24
UpperCamelCase = 16
elif checkpoint == "medium":
UpperCamelCase = 1536
UpperCamelCase = 48
UpperCamelCase = 24
elif checkpoint == "large":
UpperCamelCase = 2048
UpperCamelCase = 48
UpperCamelCase = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCamelCase = MusicgenDecoderConfig(
hidden_size=__UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__UpperCamelCase , num_attention_heads=__UpperCamelCase , )
return config
@torch.no_grad()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="cpu" )-> Optional[int]:
UpperCamelCase = MusicGen.get_pretrained(__UpperCamelCase , device=__UpperCamelCase )
UpperCamelCase = decoder_config_from_checkpoint(__UpperCamelCase )
UpperCamelCase = fairseq_model.lm.state_dict()
UpperCamelCase ,UpperCamelCase = rename_state_dict(
__UpperCamelCase , hidden_size=decoder_config.hidden_size )
UpperCamelCase = TaEncoderModel.from_pretrained("""t5-base""" )
UpperCamelCase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
UpperCamelCase = MusicgenForCausalLM(__UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCamelCase ,UpperCamelCase = decoder.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__UpperCamelCase ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=__UpperCamelCase , audio_encoder=__UpperCamelCase , decoder=__UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__UpperCamelCase )
# check we can do a forward pass
UpperCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCamelCase = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
UpperCamelCase = AutoTokenizer.from_pretrained("""t5-base""" )
UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
UpperCamelCase = MusicgenProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase )
# set the appropriate bos/pad token ids
UpperCamelCase = 2048
UpperCamelCase = 2048
# set other default generation config params
UpperCamelCase = int(30 * audio_encoder.config.frame_rate )
UpperCamelCase = True
UpperCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__UpperCamelCase )
processor.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 183 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-1'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-2'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-3'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-4'
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> int:
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=_SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , _SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 183 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = IFPipeline
A = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
A = TEXT_TO_IMAGE_BATCH_PARAMS
A = PipelineTesterMixin.required_optional_params - {"latents"}
def a_ (self ) -> List[Any]:
return self._get_dummy_components()
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> Optional[Any]:
if str(_UpperCAmelCase ).startswith("mps" ):
__UpperCamelCase : str = torch.manual_seed(_UpperCAmelCase )
else:
__UpperCamelCase : str = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__UpperCamelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def a_ (self ) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def a_ (self ) -> Optional[int]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a_ (self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a_ (self ) -> Union[str, Any]:
self._test_save_load_local()
def a_ (self ) -> Any:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def a_ (self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def a_ (self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ (self ) -> Dict:
# if
__UpperCamelCase : Optional[Any] = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
__UpperCamelCase : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
__UpperCamelCase , __UpperCamelCase : Optional[int] = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__UpperCamelCase : Any = None
__UpperCamelCase : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__UpperCamelCase : List[Any] = IFImgaImgPipeline(**pipe_a.components )
__UpperCamelCase : Tuple = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__UpperCamelCase : str = IFInpaintingPipeline(**pipe_a.components )
__UpperCamelCase : List[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
__UpperCamelCase : int = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="np" , )
__UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__UpperCamelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
__UpperCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
__UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
__UpperCamelCase : int = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="np" , )
__UpperCamelCase : Optional[int] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__UpperCamelCase : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__UpperCamelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
# pipeline 1
_start_torch_memory_measurement()
__UpperCamelCase : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase : List[Any] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="np" , )
__UpperCamelCase : str = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__UpperCamelCase : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__UpperCamelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
__UpperCamelCase : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase : Tuple = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
__UpperCamelCase : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="np" , )
__UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__UpperCamelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__UpperCamelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
# pipeline 1
_start_torch_memory_measurement()
__UpperCamelCase : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
__UpperCamelCase : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(_UpperCAmelCase )
__UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase : List[Any] = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="np" , )
__UpperCamelCase : Any = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__UpperCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__UpperCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
__UpperCamelCase : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
__UpperCamelCase : str = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(_UpperCAmelCase )
__UpperCamelCase : Dict = pipe_a(
prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="np" , )
__UpperCamelCase : Dict = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__UpperCamelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__UpperCamelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
def __lowerCAmelCase ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 298 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298 | 1 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowerCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __snake_case :
lowerCAmelCase__ = field(
default=_a , metadata={"help": "Model type selected in the list: " + ", ".join(_a )} )
lowerCAmelCase__ = field(
default=_a , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
lowerCAmelCase__ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase__ = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
lowerCAmelCase__ = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
lowerCAmelCase__ = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCAmelCase__ = field(
default=_a , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
lowerCAmelCase__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowerCAmelCase__ = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowerCAmelCase__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class __snake_case (_a ):
lowerCAmelCase__ = "train"
lowerCAmelCase__ = "dev"
class __snake_case (_a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self : Tuple , _UpperCAmelCase : SquadDataTrainingArguments , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Union[str, Split] = Split.train , _UpperCAmelCase : Optional[bool] = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[str] = "pt" , ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = args
_lowerCAmelCase : Union[str, Any] = is_language_sensitive
_lowerCAmelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
_lowerCAmelCase : List[str] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_lowerCAmelCase : List[str] = mode
# Load data features from cache or dataset file
_lowerCAmelCase : List[Any] = """v2""" if args.version_2_with_negative else """v1"""
_lowerCAmelCase : Any = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase : Union[str, Any] = cached_features_file + """.lock"""
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
_lowerCAmelCase : Dict = time.time()
_lowerCAmelCase : Any = torch.load(_UpperCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase : List[str] = self.old_features["""features"""]
_lowerCAmelCase : List[Any] = self.old_features.get("""dataset""" , _UpperCAmelCase )
_lowerCAmelCase : int = self.old_features.get("""examples""" , _UpperCAmelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
""" future run""" )
else:
if mode == Split.dev:
_lowerCAmelCase : Optional[Any] = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCAmelCase : Optional[Any] = self.processor.get_train_examples(args.data_dir )
_lowerCAmelCase , _lowerCAmelCase : List[str] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_UpperCAmelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _UpperCAmelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Any , _UpperCAmelCase : Dict ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_lowerCAmelCase : str = self.features[i]
_lowerCAmelCase : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCAmelCase : Tuple = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCAmelCase : Union[str, Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCAmelCase : int = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCAmelCase : Optional[int] = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCAmelCase : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCAmelCase : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCAmelCase : Union[str, Any] = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCAmelCase : Union[str, Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 159 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_lowerCamelCase : str = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __snake_case (unittest.TestCase , _a ):
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = load_tool("""text-question-answering""" )
self.tool.setup()
_lowerCAmelCase : Optional[Any] = load_tool("""text-question-answering""" , remote=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tool(_UpperCAmelCase , """What did Hugging Face do in April 2021?""" )
self.assertEqual(_UpperCAmelCase , """launched the BigScience Research Workshop""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.remote_tool(_UpperCAmelCase , """What did Hugging Face do in April 2021?""" )
self.assertEqual(_UpperCAmelCase , """launched the BigScience Research Workshop""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.tool(text=_UpperCAmelCase , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(_UpperCAmelCase , """launched the BigScience Research Workshop""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.remote_tool(text=_UpperCAmelCase , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(_UpperCAmelCase , """launched the BigScience Research Workshop""" )
| 159 | 1 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class UpperCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , SCREAMING_SNAKE_CASE__ , )
| 271 |
def __UpperCamelCase ( _lowerCAmelCase = 100_0000 ) -> int:
"""simple docstring"""
A : str = limit + 1
A : Tuple = [0] * limit
for first_term in range(1 , _lowerCAmelCase ):
for n in range(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
A : Any = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
A : Optional[int] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 116 | 0 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowerCAmelCase = re.compile(r'\b(a|an|the)\b', re.UNICODE)
__lowerCAmelCase = None
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=_SCREAMING_SNAKE_CASE , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=_SCREAMING_SNAKE_CASE , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_snake_case = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
def remove_articles(_SCREAMING_SNAKE_CASE ):
return ARTICLES_REGEX.sub(""" """ , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE ):
_snake_case = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if not s:
return []
return normalize_answer(_SCREAMING_SNAKE_CASE ).split()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = get_tokens(_SCREAMING_SNAKE_CASE )
_snake_case = get_tokens(_SCREAMING_SNAKE_CASE )
_snake_case = collections.Counter(_SCREAMING_SNAKE_CASE ) & collections.Counter(_SCREAMING_SNAKE_CASE )
_snake_case = sum(common.values() )
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_snake_case = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
_snake_case = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
_snake_case = (2 * precision * recall) / (precision + recall)
return fa
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = {}
_snake_case = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_snake_case = qa["""id"""]
_snake_case = [t for t in qa["""answers"""]["""text"""] if normalize_answer(_SCREAMING_SNAKE_CASE )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_snake_case = [""""""]
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
_snake_case = preds[qid]
# Take max over all gold answers
_snake_case = max(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
_snake_case = max(compute_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
return exact_scores, fa_scores
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = {}
for qid, s in scores.items():
_snake_case = na_probs[qid] > na_prob_thresh
if pred_na:
_snake_case = float(not qid_to_has_ans[qid] )
else:
_snake_case = s
return new_scores
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if not qid_list:
_snake_case = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
_snake_case = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for k in new_eval:
_snake_case = new_eval[k]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
plt.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_SCREAMING_SNAKE_CASE )
plt.savefig(_SCREAMING_SNAKE_CASE )
plt.clf()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
_snake_case = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
_snake_case = 0.0
_snake_case = 1.0
_snake_case = 0.0
_snake_case = [1.0]
_snake_case = [0.0]
_snake_case = 0.0
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_snake_case = true_pos / float(i + 1 )
_snake_case = true_pos / float(_SCREAMING_SNAKE_CASE )
if i == len(_SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_SCREAMING_SNAKE_CASE )
recalls.append(_SCREAMING_SNAKE_CASE )
if out_image:
plot_pr_curve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {"ap": 100.0 * avg_prec}
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if out_image_dir and not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
_snake_case = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_snake_case = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
_snake_case = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
_snake_case = {k: float(_SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()}
_snake_case = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """pr_exact""" )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """pr_f1""" )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """pr_oracle""" )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not qid_list:
return
_snake_case = [na_probs[k] for k in qid_list]
_snake_case = np.ones_like(_SCREAMING_SNAKE_CASE ) / float(len(_SCREAMING_SNAKE_CASE ) )
plt.hist(_SCREAMING_SNAKE_CASE , weights=_SCREAMING_SNAKE_CASE , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(_SCREAMING_SNAKE_CASE , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_snake_case = num_no_ans
_snake_case = cur_score
_snake_case = 0.0
_snake_case = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_snake_case = scores[qid]
else:
if preds[qid]:
_snake_case = -1
else:
_snake_case = 0
cur_score += diff
if cur_score > best_score:
_snake_case = cur_score
_snake_case = na_probs[qid]
return 100.0 * best_score / len(_SCREAMING_SNAKE_CASE ), best_thresh
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case, _snake_case = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case, _snake_case = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = best_exact
_snake_case = exact_thresh
_snake_case = best_fa
_snake_case = fa_thresh
def __SCREAMING_SNAKE_CASE ( ):
with open(OPTS.data_file ) as f:
_snake_case = json.load(_SCREAMING_SNAKE_CASE )
_snake_case = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
_snake_case = json.load(_SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_snake_case = json.load(_SCREAMING_SNAKE_CASE )
else:
_snake_case = {k: 0.0 for k in preds}
_snake_case = make_qid_to_has_ans(_SCREAMING_SNAKE_CASE ) # maps qid to True/False
_snake_case = [k for k, v in qid_to_has_ans.items() if v]
_snake_case = [k for k, v in qid_to_has_ans.items() if not v]
_snake_case, _snake_case = get_raw_scores(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
_snake_case = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
_snake_case = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if has_ans_qids:
_snake_case = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """HasAns""" )
if no_ans_qids:
_snake_case = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
print(json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) )
if __name__ == "__main__":
__lowerCAmelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main() | 270 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
_snake_case = len(_SCREAMING_SNAKE_CASE )
_snake_case = len(_SCREAMING_SNAKE_CASE )
_snake_case = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_snake_case = 0
_snake_case = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_snake_case = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_snake_case = i
_snake_case = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod() | 270 | 1 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[Any] = '''t5'''
lowerCamelCase : Optional[Any] = ['''past_key_values''']
lowerCamelCase : List[str] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : str , UpperCAmelCase__ : int=3_2_1_2_8 , UpperCAmelCase__ : Optional[int]=5_1_2 , UpperCAmelCase__ : Tuple=6_4 , UpperCAmelCase__ : Union[str, Any]=2_0_4_8 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : int=8 , UpperCAmelCase__ : Tuple=3_2 , UpperCAmelCase__ : Dict=1_2_8 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=1E-6 , UpperCAmelCase__ : str=1.0 , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Tuple=1 , **UpperCAmelCase__ : str , ) -> Optional[int]:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = d_kv
lowerCAmelCase = d_ff
lowerCAmelCase = num_layers
lowerCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase = num_heads
lowerCAmelCase = relative_attention_num_buckets
lowerCAmelCase = relative_attention_max_distance
lowerCAmelCase = dropout_rate
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_factor
lowerCAmelCase = feed_forward_proj
lowerCAmelCase = use_cache
lowerCAmelCase = self.feed_forward_proj.split('-' )
lowerCAmelCase = act_info[-1]
lowerCAmelCase = act_info[0] == 'gated'
if len(UpperCAmelCase__ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase__ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCAmelCase = 'gelu_new'
super().__init__(
pad_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ , )
class UpperCAmelCase_ ( __lowercase ):
@property
def __UpperCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowerCAmelCase = 'past_encoder_sequence + sequence'
lowerCAmelCase = {0: 'batch'}
lowerCAmelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowerCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
lowerCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase__ , direction='inputs' )
return common_inputs
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
return 1_3
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_A : Dict ={'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] =['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_A : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def _A ( _a : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCAmelCase =[num for num in range(3, 100_001, 2) if not is_prime(num)]
def _A ( _a : int ):
"""simple docstring"""
if not isinstance(_a , _a ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
A = []
for num in range(len(_a ) ):
A = 0
while 2 * i * i <= odd_composites[num]:
A = odd_composites[num] - 2 * i * i
if is_prime(_a ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_a ) == n:
return list_nums
return []
def _A ( ):
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 360 |
"""simple docstring"""
import random
def _A ( _a : list , _a : Any ):
"""simple docstring"""
A , A , A = [], [], []
for element in data:
if element < pivot:
less.append(_a )
elif element > pivot:
greater.append(_a )
else:
equal.append(_a )
return less, equal, greater
def _A ( _a : list , _a : int ):
"""simple docstring"""
if index >= len(_a ) or index < 0:
return None
A = items[random.randint(0 , len(_a ) - 1 )]
A = 0
A , A , A = _partition(_a , _a )
A = len(_a )
A = len(_a )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_a , _a )
# must be in larger
else:
return quick_select(_a , index - (m + count) )
| 77 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
SCREAMING_SNAKE_CASE = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
SCREAMING_SNAKE_CASE = 1_28
elif "12-12" in model_name:
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 12
elif "14-14" in model_name:
SCREAMING_SNAKE_CASE = 14
SCREAMING_SNAKE_CASE = 14
elif "16-16" in model_name:
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 16
else:
raise ValueError('Model not supported' )
SCREAMING_SNAKE_CASE = 'huggingface/label-files'
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE = 35
SCREAMING_SNAKE_CASE = 'speech-commands-v2-id2label.json'
else:
SCREAMING_SNAKE_CASE = 5_27
SCREAMING_SNAKE_CASE = 'audioset-id2label.json'
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
if "module.v" in name:
SCREAMING_SNAKE_CASE = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
SCREAMING_SNAKE_CASE = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
SCREAMING_SNAKE_CASE = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
SCREAMING_SNAKE_CASE = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace('attn' , 'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
SCREAMING_SNAKE_CASE = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
SCREAMING_SNAKE_CASE = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
SCREAMING_SNAKE_CASE = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "qkv" in key:
SCREAMING_SNAKE_CASE = key.split('.' )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> List[Any]:
SCREAMING_SNAKE_CASE = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE_ )
# rename some keys
SCREAMING_SNAKE_CASE = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load 🤗 model
SCREAMING_SNAKE_CASE = ASTForAudioClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
SCREAMING_SNAKE_CASE = -4.2_67_73_93 if 'speech-commands' not in model_name else -6.84_59_78
SCREAMING_SNAKE_CASE = 4.5_68_99_74 if 'speech-commands' not in model_name else 5.5_65_45_26
SCREAMING_SNAKE_CASE = 10_24 if 'speech-commands' not in model_name else 1_28
SCREAMING_SNAKE_CASE = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE = load_dataset('speech_commands' , 'v0.02' , split='validation' )
SCREAMING_SNAKE_CASE = dataset[0]['audio']['array']
else:
SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torchaudio.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = waveform.squeeze().numpy()
SCREAMING_SNAKE_CASE = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=1_60_00 , return_tensors='pt' )
# forward pass
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
SCREAMING_SNAKE_CASE = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
SCREAMING_SNAKE_CASE = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
SCREAMING_SNAKE_CASE = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
SCREAMING_SNAKE_CASE = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
SCREAMING_SNAKE_CASE = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
SCREAMING_SNAKE_CASE = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
SCREAMING_SNAKE_CASE = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
SCREAMING_SNAKE_CASE = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__UpperCamelCase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 113 |
"""simple docstring"""
__UpperCamelCase = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''negative_prompt'''])
__UpperCamelCase = frozenset([])
__UpperCamelCase = frozenset(['''image'''])
__UpperCamelCase = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__UpperCamelCase = frozenset(['''image'''])
__UpperCamelCase = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
__UpperCamelCase = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
__UpperCamelCase = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__UpperCamelCase = frozenset(['''image''', '''mask_image'''])
__UpperCamelCase = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__UpperCamelCase = frozenset(['''example_image''', '''image''', '''mask_image'''])
__UpperCamelCase = frozenset(['''class_labels'''])
__UpperCamelCase = frozenset(['''class_labels'''])
__UpperCamelCase = frozenset(['''batch_size'''])
__UpperCamelCase = frozenset([])
__UpperCamelCase = frozenset(['''batch_size'''])
__UpperCamelCase = frozenset([])
__UpperCamelCase = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''negative_prompt'''])
__UpperCamelCase = frozenset(['''input_tokens'''])
__UpperCamelCase = frozenset(['''input_tokens'''])
| 113 | 1 |
import logging
from transformers import PretrainedConfig
lowerCamelCase_ = logging.getLogger(__name__)
lowerCamelCase_ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Tuple = 'bertabs'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=512 , lowerCamelCase=6 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=512 , lowerCamelCase=0.2 , lowerCamelCase=6 , lowerCamelCase=768 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=0.2 , **lowerCamelCase , ) -> str:
super().__init__(**lowerCamelCase )
snake_case_ = vocab_size
snake_case_ = max_pos
snake_case_ = enc_layers
snake_case_ = enc_hidden_size
snake_case_ = enc_heads
snake_case_ = enc_ff_size
snake_case_ = enc_dropout
snake_case_ = dec_layers
snake_case_ = dec_hidden_size
snake_case_ = dec_heads
snake_case_ = dec_ff_size
snake_case_ = dec_dropout | 370 |
from torch import nn
def UpperCamelCase( lowercase_ ) -> Tuple:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' ) | 34 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__a: Tuple = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(default=A__ , metadata={"help": "Whether to use SortishSampler or not."} )
SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Optional[Any] = super().to_dict()
for k, v in d.items():
if isinstance(a_ , a_ ):
lowercase__ : Tuple = v.to_dict()
return d
| 198 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : int = 3
lowerCAmelCase_ : Dict = (32, 32)
lowerCAmelCase_ : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a_ )
return image
@property
def lowerCamelCase ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def lowerCamelCase ( self : Tuple ):
torch.manual_seed(0 )
lowerCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowerCamelCase ( self : List[str] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(a_ )
@property
def lowerCamelCase ( self : Union[str, Any] ):
def extract(*a_ : Tuple , **a_ : Tuple ):
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] = torch.ones([0] )
def lowerCamelCase ( self : str , a_ : Optional[int] ):
self.pixel_values.to(a_ )
return self
return Out()
return extract
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : List[Any] = self.dummy_cond_unet
lowerCAmelCase_ : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
lowerCAmelCase_ : List[Any] = self.dummy_vae
lowerCAmelCase_ : List[str] = self.dummy_text_encoder
lowerCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : Optional[Any] = StableDiffusionPipeline(
unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : str = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : Any = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = sd_pipe([prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : Dict = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : str = sd_pipe(
[prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a_ , )[0]
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
lowerCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : Any = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Union[str, Any] = self.dummy_cond_unet
lowerCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=a_ )
lowerCAmelCase_ : List[Any] = self.dummy_vae
lowerCAmelCase_ : List[str] = self.dummy_text_encoder
lowerCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : List[str] = StableDiffusionPipeline(
unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Optional[Any] = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : List[Any] = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : Any = sd_pipe([prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : List[str] = torch.Generator(device=a_ ).manual_seed(0 )
lowerCAmelCase_ : Optional[int] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a_ , )[0]
lowerCAmelCase_ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=a_ )
assert isinstance(a_ , a_ )
assert isinstance(pipe.scheduler , a_ )
assert pipe.safety_checker is None
lowerCAmelCase_ : str = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
lowerCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase_ : Any = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : str = self.dummy_cond_unet
lowerCAmelCase_ : str = PNDMScheduler(skip_prk_steps=a_ )
lowerCAmelCase_ : Tuple = self.dummy_vae
lowerCAmelCase_ : Dict = self.dummy_text_encoder
lowerCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
lowerCAmelCase_ : int = unet.half()
lowerCAmelCase_ : Dict = vae.half()
lowerCAmelCase_ : List[Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : Optional[int] = StableDiffusionPipeline(
unet=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , safety_checker=a_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : List[str] = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a_ )
lowerCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ : str = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : List[str] = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
lowerCAmelCase_ : Optional[int] = 40_03_66_03_46
lowerCAmelCase_ : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(a_ )
lowerCAmelCase_ : Union[str, Any] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Union[str, Any] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowerCAmelCase_ : List[str] = torch.manual_seed(a_ )
lowerCAmelCase_ : Any = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ : Optional[Any] = output.images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Optional[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a_ )
lowerCAmelCase_ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase_ : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Union[str, Any] = "padme amidala taking a bath artwork, safe for work, no nudity"
lowerCAmelCase_ : Union[str, Any] = 27_34_97_17_55
lowerCAmelCase_ : Union[str, Any] = 7
lowerCAmelCase_ : str = torch.manual_seed(a_ )
lowerCAmelCase_ : Dict = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCAmelCase_ : Any = output.images
lowerCAmelCase_ : int = image[0, -3:, -3:, -1]
lowerCAmelCase_ : int = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowerCAmelCase_ : Optional[int] = torch.manual_seed(a_ )
lowerCAmelCase_ : Union[str, Any] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ : Any = output.images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
lowerCAmelCase_ : Any = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
lowerCAmelCase_ : Tuple = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
lowerCAmelCase_ : List[Any] = 10_44_35_52_34
lowerCAmelCase_ : Dict = 12
lowerCAmelCase_ : int = torch.manual_seed(a_ )
lowerCAmelCase_ : List[str] = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCAmelCase_ : int = output.images
lowerCAmelCase_ : int = image[0, -3:, -3:, -1]
lowerCAmelCase_ : int = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowerCAmelCase_ : int = torch.manual_seed(a_ )
lowerCAmelCase_ : Any = sd_pipe(
[prompt] , generator=a_ , guidance_scale=a_ , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase_ : Optional[Any] = output.images
lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : str = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 241 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase : List[str] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for attribute in key.split("." ):
SCREAMING_SNAKE_CASE_: Tuple = getattr(a__ , a__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE_: List[Any] = getattr(a__ , a__ ).shape
else:
SCREAMING_SNAKE_CASE_: Any = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE_: List[str] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_: Any = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_: List[str] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_: Union[str, Any] = value
else:
SCREAMING_SNAKE_CASE_: Dict = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = []
SCREAMING_SNAKE_CASE_: Dict = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_: Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE_: Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
SCREAMING_SNAKE_CASE_: List[Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_: Any = name.split(a__ )[0].split("." )[-2]
SCREAMING_SNAKE_CASE_: str = mapped_key.replace("*" , a__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_: int = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
SCREAMING_SNAKE_CASE_: str = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE_: Optional[Any] = "weight"
else:
SCREAMING_SNAKE_CASE_: List[str] = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(f"Unused weights: {unused_weights}" )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE_: Optional[Any] = name.split("." )
SCREAMING_SNAKE_CASE_: Optional[int] = int(items[0] )
SCREAMING_SNAKE_CASE_: Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE_: Any = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE_: str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE_: Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE_: Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a__ )
@torch.no_grad()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: int = torch.load(a__ )
SCREAMING_SNAKE_CASE_: Optional[Any] = WavLMConfigOrig(checkpoint["cfg"] )
SCREAMING_SNAKE_CASE_: str = WavLMOrig(a__ )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] = WavLMConfig.from_pretrained(a__ )
else:
SCREAMING_SNAKE_CASE_: Dict = WavLMConfig()
SCREAMING_SNAKE_CASE_: Any = WavLMModel(a__ )
recursively_load_weights(a__ , a__ )
hf_wavlm.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 369 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.txt"""}
lowerCAmelCase : List[str] = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
lowerCAmelCase : List[Any] = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
lowerCAmelCase : Tuple = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = VOCAB_FILES_NAMES
_UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Dict = ConvBertTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Any="[UNK]" , lowerCAmelCase__ : Optional[Any]="[SEP]" , lowerCAmelCase__ : Any="[PAD]" , lowerCAmelCase__ : Dict="[CLS]" , lowerCAmelCase__ : Dict="[MASK]" , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Dict , ):
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , lowerCAmelCase__) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_: Optional[int] = getattr(lowerCAmelCase__ , normalizer_state.pop("type"))
SCREAMING_SNAKE_CASE_: Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_: List[str] = strip_accents
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_: Optional[int] = normalizer_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=None):
SCREAMING_SNAKE_CASE_: List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
SCREAMING_SNAKE_CASE_: Any = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
| 127 | 0 |
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return 1 if input_a == input_a else 0
def lowerCamelCase__ ():
assert xnor_gate(0 , 0) == 1
assert xnor_gate(0 , 1) == 0
assert xnor_gate(1 , 0) == 0
assert xnor_gate(1 , 1) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 137 |
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s
__UpperCAmelCase = 3e8 # unit of c : m * s^-1
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]:
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCAmelCase_ :Any = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__snake_case : Optional[Any] ={
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Any):
'''simple docstring'''
if args.student_type == "roberta":
lowerCAmelCase__ : Optional[int] = False
elif args.student_type == "gpt2":
lowerCAmelCase__ : Tuple = False
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : List[str]):
'''simple docstring'''
if args.student_type == "roberta":
lowerCAmelCase__ : Dict = False
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Any = argparse.ArgumentParser(description='''Training''')
parser.add_argument('''--force''' ,action='''store_true''' ,help='''Overwrite dump_path if it already exists.''')
parser.add_argument(
'''--dump_path''' ,type=lowerCamelCase_ ,required=lowerCamelCase_ ,help='''The output directory (log, checkpoints, parameters, etc.)''')
parser.add_argument(
'''--data_file''' ,type=lowerCamelCase_ ,required=lowerCamelCase_ ,help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' ,)
parser.add_argument(
'''--student_type''' ,type=lowerCamelCase_ ,choices=['''distilbert''', '''roberta''', '''gpt2'''] ,required=lowerCamelCase_ ,help='''The student type (DistilBERT, RoBERTa).''' ,)
parser.add_argument('''--student_config''' ,type=lowerCamelCase_ ,required=lowerCamelCase_ ,help='''Path to the student configuration.''')
parser.add_argument(
'''--student_pretrained_weights''' ,default=lowerCamelCase_ ,type=lowerCamelCase_ ,help='''Load student initialization checkpoint.''')
parser.add_argument(
'''--teacher_type''' ,choices=['''bert''', '''roberta''', '''gpt2'''] ,required=lowerCamelCase_ ,help='''Teacher type (BERT, RoBERTa).''')
parser.add_argument('''--teacher_name''' ,type=lowerCamelCase_ ,required=lowerCamelCase_ ,help='''The teacher model.''')
parser.add_argument('''--temperature''' ,default=2.0 ,type=lowerCamelCase_ ,help='''Temperature for the softmax temperature.''')
parser.add_argument(
'''--alpha_ce''' ,default=0.5 ,type=lowerCamelCase_ ,help='''Linear weight for the distillation loss. Must be >=0.''')
parser.add_argument(
'''--alpha_mlm''' ,default=0.0 ,type=lowerCamelCase_ ,help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' ,)
parser.add_argument('''--alpha_clm''' ,default=0.5 ,type=lowerCamelCase_ ,help='''Linear weight for the CLM loss. Must be >=0.''')
parser.add_argument('''--alpha_mse''' ,default=0.0 ,type=lowerCamelCase_ ,help='''Linear weight of the MSE loss. Must be >=0.''')
parser.add_argument(
'''--alpha_cos''' ,default=0.0 ,type=lowerCamelCase_ ,help='''Linear weight of the cosine embedding loss. Must be >=0.''')
parser.add_argument(
'''--mlm''' ,action='''store_true''' ,help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''')
parser.add_argument(
'''--mlm_mask_prop''' ,default=0.15 ,type=lowerCamelCase_ ,help='''Proportion of tokens for which we need to make a prediction.''' ,)
parser.add_argument('''--word_mask''' ,default=0.8 ,type=lowerCamelCase_ ,help='''Proportion of tokens to mask out.''')
parser.add_argument('''--word_keep''' ,default=0.1 ,type=lowerCamelCase_ ,help='''Proportion of tokens to keep.''')
parser.add_argument('''--word_rand''' ,default=0.1 ,type=lowerCamelCase_ ,help='''Proportion of tokens to randomly replace.''')
parser.add_argument(
'''--mlm_smoothing''' ,default=0.7 ,type=lowerCamelCase_ ,help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' ,)
parser.add_argument('''--token_counts''' ,type=lowerCamelCase_ ,help='''The token counts in the data_file for MLM.''')
parser.add_argument(
'''--restrict_ce_to_mask''' ,action='''store_true''' ,help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' ,)
parser.add_argument(
'''--freeze_pos_embs''' ,action='''store_true''' ,help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' ,)
parser.add_argument(
'''--freeze_token_type_embds''' ,action='''store_true''' ,help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' ,)
parser.add_argument('''--n_epoch''' ,type=lowerCamelCase_ ,default=3 ,help='''Number of pass on the whole dataset.''')
parser.add_argument('''--batch_size''' ,type=lowerCamelCase_ ,default=5 ,help='''Batch size (for each process).''')
parser.add_argument(
'''--group_by_size''' ,action='''store_false''' ,help='''If true, group sequences that have similar length into the same batch. Default is true.''' ,)
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=lowerCamelCase_ ,default=50 ,help='''Gradient accumulation for larger training batches.''' ,)
parser.add_argument('''--warmup_prop''' ,default=0.05 ,type=lowerCamelCase_ ,help='''Linear warmup proportion.''')
parser.add_argument('''--weight_decay''' ,default=0.0 ,type=lowerCamelCase_ ,help='''Weight decay if we apply some.''')
parser.add_argument('''--learning_rate''' ,default=5E-4 ,type=lowerCamelCase_ ,help='''The initial learning rate for Adam.''')
parser.add_argument('''--adam_epsilon''' ,default=1E-6 ,type=lowerCamelCase_ ,help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' ,default=5.0 ,type=lowerCamelCase_ ,help='''Max gradient norm.''')
parser.add_argument('''--initializer_range''' ,default=0.02 ,type=lowerCamelCase_ ,help='''Random initialization range.''')
parser.add_argument(
'''--fp16''' ,action='''store_true''' ,help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' ,)
parser.add_argument(
'''--fp16_opt_level''' ,type=lowerCamelCase_ ,default='''O1''' ,help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) ,)
parser.add_argument('''--n_gpu''' ,type=lowerCamelCase_ ,default=1 ,help='''Number of GPUs in the node.''')
parser.add_argument('''--local_rank''' ,type=lowerCamelCase_ ,default=-1 ,help='''Distributed training - Local rank''')
parser.add_argument('''--seed''' ,type=lowerCamelCase_ ,default=56 ,help='''Random seed''')
parser.add_argument('''--log_interval''' ,type=lowerCamelCase_ ,default=500 ,help='''Tensorboard logging interval.''')
parser.add_argument('''--checkpoint_interval''' ,type=lowerCamelCase_ ,default=4000 ,help='''Checkpoint interval.''')
lowerCAmelCase__ : str = parser.parse_args()
sanity_checks(lowerCamelCase_)
# ARGS #
init_gpu_params(lowerCamelCase_)
set_seed(lowerCamelCase_)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''')
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""")
# SAVE PARAMS #
logger.info(f"""Param: {args}""")
with open(os.path.join(args.dump_path ,'''parameters.json''') ,'''w''') as f:
json.dump(vars(lowerCamelCase_) ,lowerCamelCase_ ,indent=4)
git_log(args.dump_path)
lowerCAmelCase__ : Optional[Any] = MODEL_CLASSES[args.student_type]
lowerCAmelCase__ : Optional[int] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCAmelCase__ : int = teacher_tokenizer_class.from_pretrained(args.teacher_name)
lowerCAmelCase__ : int = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCAmelCase__ : Optional[int] = tokenizer.all_special_tokens.index(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""")
lowerCAmelCase__ : Tuple = special_tok_ids
lowerCAmelCase__ : int = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file ,'''rb''') as fp:
lowerCAmelCase__ : Any = pickle.load(lowerCamelCase_)
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""")
with open(args.token_counts ,'''rb''') as fp:
lowerCAmelCase__ : Optional[Any] = pickle.load(lowerCamelCase_)
lowerCAmelCase__ : str = np.maximum(lowerCamelCase_ ,1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCAmelCase__ : List[Any] = 0.0 # do not predict special tokens
lowerCAmelCase__ : Optional[Any] = torch.from_numpy(lowerCamelCase_)
else:
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Union[str, Any] = LmSeqsDataset(params=lowerCamelCase_ ,data=lowerCamelCase_)
logger.info('''Data loader created.''')
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""")
lowerCAmelCase__ : List[str] = student_config_class.from_pretrained(args.student_config)
lowerCAmelCase__ : str = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""")
lowerCAmelCase__ : Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights ,config=lowerCamelCase_)
else:
lowerCAmelCase__ : int = student_model_class(lowerCamelCase_)
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""")
logger.info('''Student loaded.''')
# TEACHER #
lowerCAmelCase__ : Any = teacher_model_class.from_pretrained(args.teacher_name ,output_hidden_states=lowerCamelCase_)
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""")
logger.info(f"""Teacher loaded from {args.teacher_name}.""")
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowerCamelCase_ ,lowerCamelCase_)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowerCamelCase_ ,lowerCamelCase_)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCAmelCase__ : List[Any] = Distiller(
params=lowerCamelCase_ ,dataset=lowerCamelCase_ ,token_probs=lowerCamelCase_ ,student=lowerCamelCase_ ,teacher=lowerCamelCase_)
distiller.train()
logger.info('''Let\'s go get some drinks.''')
if __name__ == "__main__":
main()
| 360 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase__ ( lowerCamelCase_ : ndarray):
'''simple docstring'''
return np.dot(lowerCamelCase_ ,lowerCamelCase_)
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,*,
__lowerCamelCase = np.inf ,__lowerCamelCase = "linear" ,__lowerCamelCase = 0.0 ,) -> None:
"""simple docstring"""
lowerCAmelCase__ : Any = regularization
lowerCAmelCase__ : str = gamma
if kernel == "linear":
lowerCAmelCase__ : Dict = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
lowerCAmelCase__ : Optional[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCAmelCase__ : List[str] = f"""Unknown kernel: {kernel}"""
raise ValueError(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> float:
"""simple docstring"""
return np.dot(__lowerCamelCase ,__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> float:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = observations
lowerCAmelCase__ : Optional[int] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCAmelCase__) , ) : List[str] = np.shape(__lowerCamelCase )
def to_minimize(__lowerCamelCase ) -> float:
lowerCAmelCase__ : List[str] = 0
((lowerCAmelCase__) , ) : str = np.shape(__lowerCamelCase )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(__lowerCamelCase )
lowerCAmelCase__ : List[str] = LinearConstraint(__lowerCamelCase ,0 ,0 )
lowerCAmelCase__ : List[str] = Bounds(0 ,self.regularization )
lowerCAmelCase__ : int = minimize(
__lowerCamelCase ,np.ones(__lowerCamelCase ) ,bounds=__lowerCamelCase ,constraints=[ly_contraint] ).x
lowerCAmelCase__ : List[Any] = l_star
# calculating mean offset of separation plane to points
lowerCAmelCase__ : Optional[Any] = 0
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
lowerCAmelCase__ : Dict = s / n
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,__lowerCamelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | 0 |
class A__ :
def __init__( self , A_ , A_=None , A_=None ):
'''simple docstring'''
UpperCamelCase : List[str] = data
UpperCamelCase : Optional[int] = previous
UpperCamelCase : Optional[Any] = next_node
def __str__( self ):
'''simple docstring'''
return F"""{self.data}"""
def __UpperCamelCase( self ):
'''simple docstring'''
return self.data
def __UpperCamelCase( self ):
'''simple docstring'''
return self.next
def __UpperCamelCase( self ):
'''simple docstring'''
return self.previous
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = head
def __iter__( self ):
'''simple docstring'''
return self
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
UpperCamelCase : Optional[Any] = self.current.get_data()
UpperCamelCase : List[str] = self.current.get_next()
return value
class A__ :
def __init__( self ):
'''simple docstring'''
UpperCamelCase : Any = None # First node in list
UpperCamelCase : Optional[int] = None # Last node in list
def __str__( self ):
'''simple docstring'''
UpperCamelCase : str = self.head
UpperCamelCase : Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase : Dict = current.get_next()
return " ".join(str(A_ ) for node in nodes )
def __contains__( self , A_ ):
'''simple docstring'''
UpperCamelCase : int = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase : Tuple = current.get_next()
return False
def __iter__( self ):
'''simple docstring'''
return LinkedListIterator(self.head )
def __UpperCamelCase( self ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def __UpperCamelCase( self ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.head is None:
UpperCamelCase : List[Any] = node
UpperCamelCase : Union[str, Any] = node
else:
self.insert_before_node(self.head , A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.head is None:
self.set_head(A_ )
else:
self.insert_after_node(self.tail , A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = Node(A_ )
if self.head is None:
self.set_head(A_ )
else:
self.set_tail(A_ )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[int] = node
UpperCamelCase : List[Any] = node.previous
if node.get_previous() is None:
UpperCamelCase : str = node_to_insert
else:
UpperCamelCase : Dict = node_to_insert
UpperCamelCase : Optional[Any] = node_to_insert
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = node
UpperCamelCase : Optional[Any] = node.next
if node.get_next() is None:
UpperCamelCase : List[str] = node_to_insert
else:
UpperCamelCase : int = node_to_insert
UpperCamelCase : Union[str, Any] = node_to_insert
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = 1
UpperCamelCase : Tuple = Node(A_ )
UpperCamelCase : Optional[int] = self.head
while node:
if current_position == position:
self.insert_before_node(A_ , A_ )
return
current_position += 1
UpperCamelCase : Union[str, Any] = node.next
self.insert_after_node(self.tail , A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase : int = node.get_next()
raise Exception("Node not found" )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if (node := self.get_node(A_ )) is not None:
if node == self.head:
UpperCamelCase : Optional[Any] = self.head.get_next()
if node == self.tail:
UpperCamelCase : Dict = self.tail.get_previous()
self.remove_node_pointers(A_ )
@staticmethod
def __UpperCamelCase( A_ ):
'''simple docstring'''
if node.get_next():
UpperCamelCase : List[str] = node.previous
if node.get_previous():
UpperCamelCase : Dict = node.next
UpperCamelCase : Any = None
UpperCamelCase : Optional[Any] = None
def __UpperCamelCase( self ):
'''simple docstring'''
return self.head is None
def A_ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
__lowerCamelCase = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 162 | 0 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
lowercase__ : Optional[Any] = math.sqrt(__lowerCamelCase )
lowercase__ : List[Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> np.ndarray:
lowercase__ : List[str] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
lowercase__ : Tuple = np.zeros((kernel_size, kernel_size) )
for i in range(0 , __lowerCamelCase ):
for j in range(0 , __lowerCamelCase ):
lowercase__ : Tuple = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> np.ndarray:
lowercase__ : Tuple = np.zeros(img.shape )
lowercase__ : Any = get_gauss_kernel(__lowerCamelCase , __lowerCamelCase )
lowercase__ , lowercase__ : List[Any] = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
lowercase__ : List[str] = get_slice(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase__ : str = img_s - img_s[kernel_size // 2, kernel_size // 2]
lowercase__ : Optional[Any] = vec_gaussian(__lowerCamelCase , __lowerCamelCase )
lowercase__ : List[str] = np.multiply(__lowerCamelCase , __lowerCamelCase )
lowercase__ : List[Any] = np.multiply(__lowerCamelCase , __lowerCamelCase )
lowercase__ : List[str] = np.sum(__lowerCamelCase ) / np.sum(__lowerCamelCase )
lowercase__ : List[Any] = val
return imga
def __UpperCAmelCase ( __lowerCamelCase ) -> tuple:
lowercase__ : Optional[int] = args[1] if args[1:] else '''../image_data/lena.jpg'''
lowercase__ : Optional[Any] = float(args[2] ) if args[2:] else 1.0
lowercase__ : Optional[Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
lowercase__ : Optional[int] = int(args[4] )
lowercase__ : Optional[Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
lowercase__ : Optional[Any] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = parse_args(sys.argv)
lowerCAmelCase_ = cva.imread(filename, 0)
cva.imshow('input image', img)
lowerCAmelCase_ = img / 255
lowerCAmelCase_ = out.astype('float32')
lowerCAmelCase_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCAmelCase_ = out * 255
lowerCAmelCase_ = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 302 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase_ = _symbol_database.Default()
lowerCAmelCase_ = _descriptor_pool.Default().AddSerializedFile(
B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
lowerCAmelCase_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase_ = None
lowerCAmelCase_ = B'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase_ = 45
lowerCAmelCase_ = 1_581
lowerCAmelCase_ = 1_517
lowerCAmelCase_ = 1_570
lowerCAmelCase_ = 1_584
lowerCAmelCase_ = 1_793
lowerCAmelCase_ = 1_795
lowerCAmelCase_ = 1_916
lowerCAmelCase_ = 1_864
lowerCAmelCase_ = 1_905
lowerCAmelCase_ = 1_919
lowerCAmelCase_ = 2_429
lowerCAmelCase_ = 2_208
lowerCAmelCase_ = 2_418
lowerCAmelCase_ = 2_323
lowerCAmelCase_ = 2_407
# @@protoc_insertion_point(module_scope)
| 302 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __UpperCamelCase :
A_ = field(
metadata={"help": "The output directory where the model will be written."} , )
A_ = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} , )
A_ = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} , )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def lowerCamelCase ():
__a : Dict = HfArgumentParser((ModelArguments,) )
((__a) , ) : Tuple = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__a : int = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__a : List[str] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__a : Dict = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__a : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__a : Dict = True
__a : List[Any] = True
__a : List[str] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=_SCREAMING_SNAKE_CASE , decoder_config=_SCREAMING_SNAKE_CASE , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__a : Dict = decoder_config.decoder_start_token_id
__a : Any = decoder_config.pad_token_id
if decoder_start_token_id is None:
__a : List[Any] = decoder_config.bos_token_id
if pad_token_id is None:
__a : Union[str, Any] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__a : List[str] = decoder_config.eos_token_id
__a : int = decoder_start_token_id
__a : List[str] = pad_token_id
__a : Tuple = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__a : str = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__a : List[str] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 27 |
"""simple docstring"""
def lowercase (_lowerCAmelCase = 100_0000 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _lowerCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 301 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Optional[Any] = "swin2sr"
UpperCAmelCase__ : int = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self : List[str] , _A : int=6_4 , _A : str=1 , _A : Dict=3 , _A : Dict=1_8_0 , _A : Tuple=[6, 6, 6, 6, 6, 6] , _A : List[str]=[6, 6, 6, 6, 6, 6] , _A : str=8 , _A : Tuple=2.0 , _A : List[Any]=True , _A : List[str]=0.0 , _A : Optional[int]=0.0 , _A : List[str]=0.1 , _A : Optional[int]="gelu" , _A : str=False , _A : int=0.02 , _A : int=1E-5 , _A : Union[str, Any]=2 , _A : Optional[int]=1.0 , _A : List[Any]="1conv" , _A : List[str]="pixelshuffle" , **_A : Union[str, Any] , ) -> List[str]:
super().__init__(**_A )
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = embed_dim
snake_case = depths
snake_case = len(_A )
snake_case = num_heads
snake_case = window_size
snake_case = mlp_ratio
snake_case = qkv_bias
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = drop_path_rate
snake_case = hidden_act
snake_case = use_absolute_embeddings
snake_case = layer_norm_eps
snake_case = initializer_range
snake_case = upscale
snake_case = img_range
snake_case = resi_connection
snake_case = upsampler
| 362 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_A = logging.get_logger(__name__)
_A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_A = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
_A = {
"bert-base-uncased": 5_12,
"bert-large-uncased": 5_12,
"bert-base-cased": 5_12,
"bert-large-cased": 5_12,
"bert-base-multilingual-uncased": 5_12,
"bert-base-multilingual-cased": 5_12,
"bert-base-chinese": 5_12,
"bert-base-german-cased": 5_12,
"bert-large-uncased-whole-word-masking": 5_12,
"bert-large-cased-whole-word-masking": 5_12,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_12,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_12,
"bert-base-cased-finetuned-mrpc": 5_12,
"bert-base-german-dbmdz-cased": 5_12,
"bert-base-german-dbmdz-uncased": 5_12,
"TurkuNLP/bert-base-finnish-cased-v1": 5_12,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_12,
"wietsedv/bert-base-dutch-cased": 5_12,
}
_A = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : int = BertTokenizer
def __init__(self : Union[str, Any] , _A : Union[str, Any]=None , _A : Optional[int]=None , _A : List[str]=True , _A : List[Any]="[UNK]" , _A : Union[str, Any]="[SEP]" , _A : List[Any]="[PAD]" , _A : List[Any]="[CLS]" , _A : Union[str, Any]="[MASK]" , _A : int=True , _A : Tuple=None , **_A : Optional[int] , ) -> int:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _A ) != do_lower_case
or normalizer_state.get("strip_accents" , _A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _A ) != tokenize_chinese_chars
):
snake_case = getattr(_A , normalizer_state.pop("type" ) )
snake_case = do_lower_case
snake_case = strip_accents
snake_case = tokenize_chinese_chars
snake_case = normalizer_class(**_A )
snake_case = do_lower_case
def UpperCAmelCase(self : str , _A : Union[str, Any] , _A : int=None ) -> Any:
snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase(self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase(self : Union[str, Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 137 | 0 |
'''simple docstring'''
import numpy
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Any , _lowerCAmelCase : numpy.ndarray , _lowerCAmelCase : numpy.ndarray ):
A = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
A = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
A = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
A = numpy.random.rand(3 , 1 )
# Real output values provided.
A = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
A = numpy.zeros(output_array.shape )
def A (self : Tuple ):
A = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A (self : List[Any] ):
A = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
A = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
A = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A (self : Union[str, Any] , _lowerCAmelCase : numpy.ndarray , _lowerCAmelCase : int , _lowerCAmelCase : bool ):
for iteration in range(1 , iterations + 1 ):
A = self.feedforward()
self.back_propagation()
if give_loss:
A = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def A (self : Union[str, Any] , _lowerCAmelCase : numpy.ndarray ):
A = input_arr
A = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __a ( UpperCAmelCase ) ->numpy.ndarray:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def __a ( UpperCAmelCase ) ->numpy.ndarray:
"""simple docstring"""
return (value) * (1 - (value))
def __a ( ) ->int:
"""simple docstring"""
A = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
A = TwoHiddenLayerNeuralNetwork(
input_array=UpperCAmelCase , output_array=UpperCAmelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=UpperCAmelCase , iterations=10 , give_loss=UpperCAmelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 258 |
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __magic_name__ ( unittest.TestCase ):
UpperCAmelCase =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case)
return generator, ["Something to write", "Something else"]
def lowerCAmelCase ( self , snake_case , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple =generator('Something there')
self.assertEqual(snake_case , [{'generated_text': ANY(snake_case)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there'))
_UpperCAmelCase : Optional[int] =generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case)
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case)}, {'generated_text': ANY(snake_case)}],
[{'generated_text': ANY(snake_case)}, {'generated_text': ANY(snake_case)}],
] , )
_UpperCAmelCase : Optional[int] =generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case)
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case)}, {'generated_text': ANY(snake_case)}],
[{'generated_text': ANY(snake_case)}, {'generated_text': ANY(snake_case)}],
] , )
with self.assertRaises(snake_case):
generator(4)
@require_torch
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt')
# do_sample=False necessary for reproducibility
_UpperCAmelCase : Optional[int] =generator('Something there' , do_sample=snake_case)
self.assertEqual(snake_case , [{'generated_text': ''}])
_UpperCAmelCase : List[str] =3
_UpperCAmelCase : str =generator(
'Something there' , num_return_sequences=snake_case , num_beams=snake_case , )
_UpperCAmelCase : str =[
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(snake_case , snake_case)
_UpperCAmelCase : str =generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case)
self.assertEqual(
snake_case , [
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
] , )
_UpperCAmelCase : str =generator.model.config.eos_token_id
_UpperCAmelCase : Optional[int] ='<pad>'
_UpperCAmelCase : Dict =generator(
['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , )
self.assertEqual(
snake_case , [
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
[
{'generated_token_ids': ANY(torch.Tensor)},
{'generated_token_ids': ANY(torch.Tensor)},
],
] , )
@require_tf
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any =pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf')
# do_sample=False necessary for reproducibility
_UpperCAmelCase : Dict =generator('Something there' , do_sample=snake_case)
self.assertEqual(snake_case , [{'generated_text': ''}])
| 242 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowercase =logging.getLogger(__name__)
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case=None) -> List[Any]:
'''simple docstring'''
super().__init__(
snake_case , question_encoder_tokenizer=snake_case , generator_tokenizer=snake_case , index=snake_case , init_retrieval=snake_case , )
_UpperCAmelCase : Union[str, Any] =None
def lowerCAmelCase ( self , snake_case) -> Optional[Any]:
'''simple docstring'''
logger.info('initializing retrieval')
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized')
# needs to be set manually
_UpperCAmelCase : Optional[Any] =self._infer_socket_ifname()
# avoid clash with the NCCL port
_UpperCAmelCase : Optional[int] =str(distributed_port + 1)
_UpperCAmelCase : Any =dist.new_group(ranks=snake_case , backend='gloo')
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main')
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
return dist.get_rank(group=self.process_group) == 0
def lowerCAmelCase ( self , snake_case , snake_case , snake_case=torch.floataa) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =torch.empty(snake_case , dtype=snake_case)
dist.scatter(snake_case , src=0 , scatter_list=snake_case , group=self.process_group)
return target_tensor
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_UpperCAmelCase : str =next((addr for addr in addrs if addr.startswith('e')) , snake_case)
return ifname
def lowerCAmelCase ( self , snake_case , snake_case) -> Tuple[np.ndarray, List[dict]]:
'''simple docstring'''
# single GPU training
if not dist.is_initialized():
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =self._main_retrieve(snake_case , snake_case)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(snake_case)
# distributed training
_UpperCAmelCase : Optional[int] =dist.get_world_size(group=self.process_group)
# gather logic
_UpperCAmelCase : str =None
if self._is_main():
_UpperCAmelCase : Union[str, Any] =[torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(snake_case)]
dist.gather(torch.tensor(snake_case) , dst=0 , gather_list=snake_case , group=self.process_group)
# scatter logic
_UpperCAmelCase : Optional[Any] =question_hidden_states.shape[0]
_UpperCAmelCase : List[Any] =[]
_UpperCAmelCase : Any =[]
if self._is_main():
assert len(snake_case) == world_size
_UpperCAmelCase , _UpperCAmelCase : Tuple =self._main_retrieve(torch.cat(snake_case).numpy() , snake_case)
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] =torch.tensor(snake_case), torch.tensor(snake_case)
_UpperCAmelCase : List[str] =self._chunk_tensor(snake_case , snake_case)
_UpperCAmelCase : Union[str, Any] =self._chunk_tensor(snake_case , snake_case)
_UpperCAmelCase : int =self._scattered(snake_case , [n_queries, n_docs] , target_type=torch.intaa)
_UpperCAmelCase : Dict =self._scattered(snake_case , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(snake_case)
| 242 | 1 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase : List[Any] = 'bart'
__lowercase : Union[str, Any] = True
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__a : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__a : Optional[int] = qar_model.eval()
else:
__a , __a : str = (None, None)
if MODEL_TYPE == "bart":
__a : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__a : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__a : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__a : str = sas_model.eval()
else:
__a , __a : Tuple = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : Optional[Any] = faiss.StandardGpuResources()
__a : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__a : int = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
__a : int = faiss.IndexFlatIP(128 )
__a : Any = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__a , __a : str = (None, None)
__a : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__a : Dict = elia['train_eli5']
__a : Optional[int] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
__a : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
__lowercase , __lowercase , __lowercase : Any = load_indexes()
__lowercase , __lowercase , __lowercase , __lowercase : Dict = load_models()
__lowercase , __lowercase : int = load_train_data()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str]=10 ):
__a : Optional[int] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a , __a : Union[str, Any] = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str="wiki40b" , _SCREAMING_SNAKE_CASE : List[str]="dense" , _SCREAMING_SNAKE_CASE : Any=10 ):
if source == "none":
__a , __a : Any = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a : str = query_qa_dense_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a , __a : Union[str, Any] = query_es_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , )
__a : Dict = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__a : Any = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None),
} )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.9_5 , _SCREAMING_SNAKE_CASE : str=0.8 ):
with torch.no_grad():
__a : Union[str, Any] = qa_sas_generate(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
__lowercase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
__lowercase : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase : Dict = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
__lowercase : Union[str, Any] = st.sidebar.checkbox('Demo options')
if demo_options:
__lowercase : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
__lowercase : Tuple = action_list.index(action_st)
__lowercase : Tuple = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
__lowercase : List[Any] = show_type == 'Show full text of passages'
else:
__lowercase : int = 3
__lowercase : str = True
__lowercase : Tuple = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
__lowercase : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
__lowercase : str = 'wiki40b'
__lowercase : List[Any] = 'dense'
__lowercase : Dict = 'beam'
__lowercase : Optional[int] = 2
__lowercase : List[str] = 64
__lowercase : Tuple = 2_56
__lowercase : List[str] = None
__lowercase : Tuple = None
__lowercase : List[Any] = st.sidebar.checkbox('Generation options')
if generate_options:
__lowercase : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
__lowercase : List[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
__lowercase : Tuple = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
__lowercase : int = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase : Dict = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowercase : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowercase : List[str] = None
# start main text
__lowercase : int = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
__lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase : Any = st.text_input('Enter your question here:', '')
else:
__lowercase : Any = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase , __lowercase : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=10)
__lowercase , __lowercase : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
__lowercase : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase : str = support_list[:10]
__lowercase : Optional[int] = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
__lowercase , __lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase , __lowercase : int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
__lowercase : str = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
__lowercase : Any = res[1].strip()
if sec_titles == "":
__lowercase : List[str] = '[{}]({})'.format(res[0], wiki_url)
else:
__lowercase : Union[str, Any] = sec_titles.split(' & ')
__lowercase : str = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase : str = find_nearest_training(question)
__lowercase : Optional[int] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
__lowercase : Any = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
__lowercase : List[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 27 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCamelCase : Tuple =False
class __a ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ : List[Any] = torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = pipe(
image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCamelCase__ : Union[str, Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : Optional[Any] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 189 | 0 |
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( _UpperCamelCase ):
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_UpperCAmelCase : Union[str, Any] = 5
# Realm tok
_UpperCAmelCase : Union[str, Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCAmelCase : int = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : Dict = os.path.join(A , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(A , exist_ok=A )
def snake_case_ ( self : Optional[Any] ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def snake_case_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : Tuple = RealmConfig(num_block_records=self.num_block_records )
return config
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : int = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : Any = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=A , )
return block_records
def snake_case_ ( self : str ):
_UpperCAmelCase : Union[str, Any] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Optional[int] = self.get_config()
_UpperCAmelCase : Any = self.get_dummy_retriever()
_UpperCAmelCase : str = retriever.tokenizer
_UpperCAmelCase : Tuple = np.array([0, 3] , dtype="long" )
_UpperCAmelCase : Any = tokenizer(["Test question"] ).input_ids
_UpperCAmelCase : Any = tokenizer(
["the fourth"] , add_special_tokens=A , return_token_type_ids=A , return_attention_mask=A , ).input_ids
_UpperCAmelCase : Dict = config.reader_seq_len
_UpperCAmelCase : Union[str, Any] = retriever(
A , A , answer_ids=A , max_length=A , return_tensors="np" )
self.assertEqual(len(A ) , 2 )
self.assertEqual(len(A ) , 2 )
self.assertEqual(len(A ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Tuple = self.get_config()
_UpperCAmelCase : List[str] = self.get_dummy_retriever()
_UpperCAmelCase : Any = retriever.tokenizer
_UpperCAmelCase : Union[str, Any] = np.array([0, 3, 5] , dtype="long" )
_UpperCAmelCase : Optional[Any] = tokenizer(["Test question"] ).input_ids
_UpperCAmelCase : List[str] = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=A , return_token_type_ids=A , return_attention_mask=A , ).input_ids
_UpperCAmelCase : str = config.reader_seq_len
_UpperCAmelCase : Tuple = retriever(
A , A , answer_ids=A , max_length=A , return_tensors="np" )
self.assertEqual([False, True, True] , A )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , A )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , A )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Dict = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
_UpperCAmelCase : Dict = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
_UpperCAmelCase : List[str] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
_UpperCAmelCase : str = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 364 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : int = 'owlvit_text_model'
def __init__( self : int , A : int=4_9_4_0_8 , A : Optional[Any]=5_1_2 , A : Optional[Any]=2_0_4_8 , A : str=1_2 , A : int=8 , A : Tuple=1_6 , A : List[Any]="quick_gelu" , A : Tuple=1e-5 , A : Union[str, Any]=0.0 , A : List[Any]=0.02 , A : str=1.0 , A : str=0 , A : List[str]=4_9_4_0_6 , A : str=4_9_4_0_7 , **A : Optional[Any] , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : List[str] = attention_dropout
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : List[Any] = initializer_factor
@classmethod
def snake_case_ ( cls : Any , A : Union[str, os.PathLike] , **A : Dict ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : List[str] = cls.get_config_dict(A , **A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
_UpperCAmelCase : int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'owlvit_vision_model'
def __init__( self : Union[str, Any] , A : Optional[int]=7_6_8 , A : int=3_0_7_2 , A : List[str]=1_2 , A : List[str]=1_2 , A : Optional[int]=3 , A : Optional[int]=7_6_8 , A : str=3_2 , A : Tuple="quick_gelu" , A : Dict=1e-5 , A : Optional[int]=0.0 , A : List[Any]=0.02 , A : str=1.0 , **A : Tuple , ):
super().__init__(**A )
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : Optional[Any] = num_channels
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Any = attention_dropout
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Tuple = initializer_factor
@classmethod
def snake_case_ ( cls : Optional[int] , A : Union[str, os.PathLike] , **A : int ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : Dict = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
_UpperCAmelCase : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : List[str] = 'owlvit'
__SCREAMING_SNAKE_CASE : Optional[Any] = True
def __init__( self : Optional[Any] , A : Dict=None , A : Tuple=None , A : Optional[Any]=5_1_2 , A : Optional[Any]=2.6_592 , A : int=True , **A : Tuple , ):
super().__init__(**A )
if text_config is None:
_UpperCAmelCase : List[Any] = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
_UpperCAmelCase : Tuple = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
_UpperCAmelCase : str = OwlViTTextConfig(**A )
_UpperCAmelCase : int = OwlViTVisionConfig(**A )
_UpperCAmelCase : Optional[Any] = projection_dim
_UpperCAmelCase : str = logit_scale_init_value
_UpperCAmelCase : Optional[Any] = return_dict
_UpperCAmelCase : str = 1.0
@classmethod
def snake_case_ ( cls : Dict , A : Union[str, os.PathLike] , **A : Any ):
cls._set_token_in_kwargs(A )
_UpperCAmelCase , _UpperCAmelCase : str = cls.get_config_dict(A , **A )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
@classmethod
def snake_case_ ( cls : Optional[int] , A : Dict , A : Dict , **A : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : int = text_config
_UpperCAmelCase : Dict = vision_config
return cls.from_dict(A , **A )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : str = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Optional[int] = self.text_config.to_dict()
_UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
_UpperCAmelCase : List[Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( _UpperCamelCase ):
@property
def snake_case_ ( self : List[str] ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def snake_case_ ( self : Optional[int] ):
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def snake_case_ ( self : str ):
return 1e-4
def snake_case_ ( self : str , A : "ProcessorMixin" , A : int = -1 , A : int = -1 , A : Optional["TensorType"] = None , ):
_UpperCAmelCase : Optional[Any] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=A , seq_length=A , framework=A )
_UpperCAmelCase : Union[str, Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=A , framework=A )
return {**text_input_dict, **image_input_dict}
@property
def snake_case_ ( self : List[Any] ):
return 1_4
| 202 | 0 |
from __future__ import annotations
_UpperCAmelCase : Union[str, Any] ='''#'''
class snake_case__:
'''simple docstring'''
def __init__( self ) -> Dict:
lowerCAmelCase_ : dict = {}
def lowercase_ ( self , __lowercase ) -> Union[str, Any]:
lowerCAmelCase_ : int = self._trie
for char in text:
if char not in trie:
lowerCAmelCase_ : Union[str, Any] = {}
lowerCAmelCase_ : List[Any] = trie[char]
lowerCAmelCase_ : Tuple = True
def lowercase_ ( self , __lowercase ) -> Any:
lowerCAmelCase_ : Union[str, Any] = self._trie
for char in prefix:
if char in trie:
lowerCAmelCase_ : int = trie[char]
else:
return []
return self._elements(lowercase_ )
def lowercase_ ( self , __lowercase ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = []
for c, v in d.items():
lowerCAmelCase_ : int = [''' '''] if c == END else [(c + s) for s in self._elements(lowercase_ )]
result.extend(lowercase_ )
return tuple(lowercase_ )
_UpperCAmelCase : Union[str, Any] =Trie()
_UpperCAmelCase : Optional[Any] =('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
lowerCAmelCase_ : Union[str, Any] = trie.find_word(_a )
return tuple(string + word for word in suffixes )
def lowerCAmelCase ( )-> Any:
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 262 |
"""simple docstring"""
import sys
lowercase__ : Dict = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __lowercase ( _a ):
snake_case_ : List[Any] = 1
for digit in s:
product *= int(_a )
return product
def __lowercase ( _a = N ):
snake_case_ : Optional[int] = -sys.maxsize - 1
snake_case_ : str = n[:13]
snake_case_ : List[Any] = 13
while cur_index < len(_a ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case_ : int = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case_ : Optional[Any] = max(_a , str_eval(_a ) )
snake_case_ : Any = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 264 | 0 |
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
__A : List[str] = {
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = EfficientNetConfig()
A = CONFIG_MAP[model_name]["hidden_dim"]
A = CONFIG_MAP[model_name]["width_coef"]
A = CONFIG_MAP[model_name]["depth_coef"]
A = CONFIG_MAP[model_name]["image_size"]
A = CONFIG_MAP[model_name]["dropout_rate"]
A = CONFIG_MAP[model_name]["dw_padding"]
A = "huggingface/label-files"
A = "imagenet-1k-id2label.json"
A = 1_000
A = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
A = {int(lowercase__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = "http://images.cocodataset.org/val2017/000000039769.jpg"
A = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = CONFIG_MAP[model_name]["image_size"]
A = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=lowercase__ , )
return preprocessor
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
A = sorted(set(lowercase__ ) )
A = len(lowercase__ )
A = {b: str(lowercase__ ) for b, i in zip(lowercase__ , range(lowercase__ ) )}
A = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
A = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
A = {}
for item in rename_keys:
if item[0] in original_param_names:
A = "efficientnet." + item[1]
A = "classifier.weight"
A = "classifier.bias"
return key_mapping
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
A = key_mapping[key]
if "_conv" in key and "kernel" in key:
A = torch.from_numpy(lowercase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
A = torch.from_numpy(lowercase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
A = torch.from_numpy(np.transpose(lowercase__ ) )
else:
A = torch.from_numpy(lowercase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase__ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
A = model_classes[model_name](
include_top=lowercase__ , weights="imagenet" , input_tensor=lowercase__ , input_shape=lowercase__ , pooling=lowercase__ , classes=1_000 , classifier_activation="softmax" , )
A = original_model.trainable_variables
A = original_model.non_trainable_variables
A = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
A = param.numpy()
A = list(tf_params.keys() )
# Load HuggingFace model
A = get_efficientnet_config(lowercase__ )
A = EfficientNetForImageClassification(lowercase__ ).eval()
A = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
A = rename_keys(lowercase__ )
replace_params(lowercase__ , lowercase__ , lowercase__ )
# Initialize preprocessor and preprocess input image
A = convert_image_processor(lowercase__ )
A = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
A = hf_model(**lowercase__ )
A = outputs.logits.detach().numpy()
# Original model inference
A = False
A = CONFIG_MAP[model_name]["image_size"]
A = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
A = image.img_to_array(lowercase__ )
A = np.expand_dims(lowercase__ , axis=0 )
A = original_model.predict(lowercase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase__ , lowercase__ , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase__ ):
os.mkdir(lowercase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase__ )
preprocessor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
A = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowercase__ )
hf_model.push_to_hub(lowercase__ )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
__A : List[str] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 355 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( lowercase__ = 600_851_475_143 ):
"""simple docstring"""
try:
A = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A = 2
A = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A = i
while n % i == 0:
A = n // i
i += 1
return int(lowercase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 57 | 0 |
"""simple docstring"""
def _A (__a = 1_00_00_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : int = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = counter
if counter > pre_counter:
SCREAMING_SNAKE_CASE_ : List[str] = inputa
SCREAMING_SNAKE_CASE_ : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 91 | """simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_lowerCAmelCase : Any = (3, 9, -11, 0, 7, 5, 1, -1)
_lowerCAmelCase : Any = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __magic_name__ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
class __magic_name__ :
"""simple docstring"""
def __init__( self :str , snake_case :Iterable[int] ):
'''simple docstring'''
A_ : Node | None = None
for i in sorted(snake_case , reverse=snake_case ):
A_ : str = Node(snake_case , self.head )
def __iter__( self :Any ):
'''simple docstring'''
A_ : List[Any] = self.head
while node:
yield node.data
A_ : Optional[int] = node.next_node
def __len__( self :Tuple ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self :Tuple ):
'''simple docstring'''
return " -> ".join([str(snake_case ) for node in self] )
def __snake_case ( _lowerCAmelCase : SortedLinkedList , _lowerCAmelCase : SortedLinkedList ) -> SortedLinkedList:
return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : int = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 70 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = [5, 11, 17, 23]
__lowerCAmelCase = [256, 512, 1024, 1024]
__lowerCAmelCase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
__lowerCAmelCase = 768
__lowerCAmelCase = [1, 1, 1, 0.5]
__lowerCAmelCase = [256, 512, 768, 768]
__lowerCAmelCase = 150
__lowerCAmelCase = 16
__lowerCAmelCase = (1, 384, 384)
__lowerCAmelCase = False
__lowerCAmelCase = "project"
if "ade" in checkpoint_url:
__lowerCAmelCase = True
__lowerCAmelCase = 768
__lowerCAmelCase = [1, 1, 1, 0.5]
__lowerCAmelCase = 150
__lowerCAmelCase = 16
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "ade20k-id2label.json"
__lowerCAmelCase = json.load(open(cached_download(hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) ) , "r" ) )
__lowerCAmelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = [1, 150, 480, 480]
return config, expected_shape
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCAmelCase = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
__lowerCAmelCase = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
__lowerCAmelCase = name.replace("patch_embed" , "" )
if "pos_embed" in name:
__lowerCAmelCase = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
__lowerCAmelCase = name.replace("proj" , "projection" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
__lowerCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
__lowerCAmelCase = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
__lowerCAmelCase = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
__lowerCAmelCase = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
__lowerCAmelCase = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
__lowerCAmelCase = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
__lowerCAmelCase = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
__lowerCAmelCase = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
__lowerCAmelCase = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCAmelCase = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
__lowerCAmelCase = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
__lowerCAmelCase = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
__lowerCAmelCase = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
__lowerCAmelCase = name.replace("conv1" , "convolution1" )
if "conv2" in name:
__lowerCAmelCase = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
__lowerCAmelCase = name.replace("pretrained" , "dpt" )
if "bn" in name:
__lowerCAmelCase = name.replace("bn" , "batch_norm" )
if "head" in name:
__lowerCAmelCase = name.replace("head" , "head.head" )
if "encoder.norm" in name:
__lowerCAmelCase = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
__lowerCAmelCase = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
__lowerCAmelCase = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
__lowerCAmelCase = name.replace(".." , "." )
if "stem.conv" in name:
__lowerCAmelCase = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
__lowerCAmelCase = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
__lowerCAmelCase = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
__lowerCAmelCase = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
__lowerCAmelCase = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
__lowerCAmelCase = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
__lowerCAmelCase = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: config.hidden_size, :]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = get_dpt_config(_UpperCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(_UpperCamelCase )
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase = val
# read in qkv matrices
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
__lowerCAmelCase = DPTForSemanticSegmentation(_UpperCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Check outputs on an image
__lowerCAmelCase = 480 if "ade" in checkpoint_url else 384
__lowerCAmelCase = DPTImageProcessor(size=_UpperCamelCase )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(_UpperCamelCase , return_tensors="pt" )
# forward pass
__lowerCAmelCase = model(**_UpperCamelCase ).logits if "ade" in checkpoint_url else model(**_UpperCamelCase ).predicted_depth
if show_prediction:
__lowerCAmelCase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=_UpperCamelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
A : str = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 57 | '''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __magic_name__ :
def __init__( self : str , lowercase_ : Dict ):
if isinstance(lowercase_ , lowercase_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowercase_ : List[Any] = deepcopy(lowercase_ )
elif os.path.exists(lowercase_ ):
with io.open(lowercase_ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ : Union[str, Any] = json.load(lowercase_ )
else:
try:
lowercase_ : int = baseaa.urlsafe_baadecode(lowercase_ ).decode("""utf-8""" )
lowercase_ : str = json.loads(lowercase_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
lowercase_ : Any = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowercase_ : Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowercase_ : str = False
if self.is_zeroa() or self.is_zeroa():
lowercase_ : Dict = set(["""cpu""", """nvme"""] )
lowercase_ : List[Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowercase_ : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Any ):
lowercase_ : Optional[Any] = self.config
# find the config node of interest if it exists
lowercase_ : Tuple = ds_key_long.split(""".""" )
lowercase_ : Union[str, Any] = nodes.pop()
for node in nodes:
lowercase_ : List[str] = config.get(lowercase_ )
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : List[str]=None ):
lowercase_ , lowercase_ : List[Any] = self.find_config_node(lowercase_ )
if config is None:
return default
return config.get(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : int=False ):
lowercase_ : int = self.config
# find the config node of interest if it exists
lowercase_ : Dict = ds_key_long.split(""".""" )
for node in nodes:
lowercase_ : List[Any] = config
lowercase_ : Dict = config.get(lowercase_ )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Tuple ):
lowercase_ : str = self.get_value(lowercase_ )
return False if value is None else bool(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Union[str, Any] = self.get_value(lowercase_ )
return False if value is None else not bool(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return self._offload
class __magic_name__ :
def __init__( self : Any , lowercase_ : Union[str, Any] ):
lowercase_ : Any = engine
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , **lowercase_ : str ):
# runs backpropagation and handles mixed precision
self.engine.backward(lowercase_ , **lowercase_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Optional[Any] , lowercase_ : Tuple ):
super().__init__(lowercase_ , device_placement=lowercase_ , scaler=lowercase_ )
lowercase_ : Any = hasattr(self.optimizer , """overflow""" )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Tuple=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Tuple ):
super().__init__(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __magic_name__ :
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str]=0.0_01 , lowercase_ : List[str]=0 , **lowercase_ : List[Any] ):
lowercase_ : str = params
lowercase_ : List[Any] = lr
lowercase_ : int = weight_decay
lowercase_ : Union[str, Any] = kwargs
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : List[str]=None , lowercase_ : int=0 , **lowercase_ : int ):
lowercase_ : Union[str, Any] = optimizer
lowercase_ : List[str] = total_num_steps
lowercase_ : Dict = warmup_num_steps
lowercase_ : Dict = kwargs
| 239 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __lowerCamelCase ( UpperCAmelCase_ : str = "laptop" ):
"""simple docstring"""
a :Optional[int] = F'''https://www.amazon.in/laptop/s?k={product}'''
a :Any = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
a :Optional[int] = BeautifulSoup(requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).text )
# Initialize a Pandas dataframe with the column titles
a :int = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
a :Tuple = item.ha.text
a :Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href''']
a :List[str] = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
a :Tuple = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
a :Optional[int] = '''Not available'''
try:
a :List[str] = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
a :Union[str, Any] = ''''''
try:
a :Tuple = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
a :Optional[Any] = float('''nan''' )
except AttributeError:
pass
a :Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
a :Union[str, Any] = ''' '''
a :Any = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
snake_case : List[str] = '''headphones'''
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 281 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = BartphoTokenizer
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
a :Dict = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
a :Optional[Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :Tuple = {'''unk_token''': '''<unk>'''}
a :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
a :Any = BartphoTokenizer(_lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :int = '''This is a là test'''
a :str = '''This is a<unk><unk> test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = BartphoTokenizer(_lowerCamelCase , self.monolingual_vocab_file , **self.special_tokens_map )
a :Optional[Any] = '''This is a là test'''
a :Tuple = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
a :int = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :Union[str, Any] = tokens + [tokenizer.unk_token]
a :str = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
| 281 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
lowercase__ = StableDiffusionLDMaDPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_UpperCamelCase : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=lowerCAmelCase_ ,set_alpha_to_one=lowerCAmelCase_ ,)
torch.manual_seed(0 )
_UpperCamelCase : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_UpperCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_UpperCamelCase : Any = CLIPTextModel(lowerCAmelCase_ )
_UpperCamelCase : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_UpperCamelCase : str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int]=0 ):
'''simple docstring'''
if str(lowerCAmelCase_ ).startswith('mps' ):
_UpperCamelCase : Optional[int] = torch.manual_seed(lowerCAmelCase_ )
else:
_UpperCamelCase : List[str] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_UpperCamelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Any = self.get_dummy_components()
_UpperCamelCase : Dict = StableDiffusionLDMaDPipeline(**lowerCAmelCase_ )
_UpperCamelCase : Union[str, Any] = ldmad_pipe.to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_UpperCamelCase : List[str] = self.get_dummy_inputs(lowerCAmelCase_ )
_UpperCamelCase : Union[str, Any] = ldmad_pipe(**lowerCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Dict = output.rgb, output.depth
_UpperCamelCase : int = rgb[0, -3:, -3:, -1]
_UpperCamelCase : Optional[int] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCamelCase : int = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
_UpperCamelCase : Union[str, Any] = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : int = self.get_dummy_components()
_UpperCamelCase : List[str] = StableDiffusionLDMaDPipeline(**lowerCAmelCase_ )
_UpperCamelCase : Tuple = ldmad_pipe.to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_UpperCamelCase : Dict = self.get_dummy_inputs(lowerCAmelCase_ )
_UpperCamelCase : int = 3 * [inputs['prompt']]
# forward
_UpperCamelCase : Tuple = ldmad_pipe(**lowerCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Optional[Any] = output.rgb, output.depth
_UpperCamelCase : Any = rgb_slice_a[0, -3:, -3:, -1]
_UpperCamelCase : Optional[Any] = depth_slice_a[0, -3:, -1]
_UpperCamelCase : int = self.get_dummy_inputs(lowerCAmelCase_ )
_UpperCamelCase : Union[str, Any] = 3 * [inputs.pop('prompt' )]
_UpperCamelCase : Tuple = ldmad_pipe.tokenizer(
lowerCAmelCase_ ,padding='max_length' ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=lowerCAmelCase_ ,return_tensors='pt' ,)
_UpperCamelCase : Union[str, Any] = text_inputs['input_ids'].to(lowerCAmelCase_ )
_UpperCamelCase : Dict = ldmad_pipe.text_encoder(lowerCAmelCase_ )[0]
_UpperCamelCase : Any = prompt_embeds
# forward
_UpperCamelCase : List[str] = ldmad_pipe(**lowerCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Dict = output.rgb, output.depth
_UpperCamelCase : int = rgb_slice_a[0, -3:, -3:, -1]
_UpperCamelCase : List[str] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase : Tuple = self.get_dummy_components()
_UpperCamelCase : List[str] = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
_UpperCamelCase : str = StableDiffusionLDMaDPipeline(**lowerCAmelCase_ )
_UpperCamelCase : List[str] = ldmad_pipe.to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_UpperCamelCase : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
_UpperCamelCase : Optional[int] = 'french fries'
_UpperCamelCase : str = ldmad_pipe(**lowerCAmelCase_ ,negative_prompt=lowerCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : List[str] = output.rgb, output.depth
_UpperCamelCase : Optional[Any] = rgb[0, -3:, -3:, -1]
_UpperCamelCase : List[str] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCamelCase : List[str] = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
_UpperCamelCase : Tuple = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict="cpu" ,lowerCamelCase__ : Optional[int]=torch.floataa ,lowerCamelCase__ : Union[str, Any]=0 ):
'''simple docstring'''
_UpperCamelCase : Dict = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_UpperCamelCase : List[str] = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 64, 64) )
_UpperCamelCase : List[str] = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ ,dtype=lowerCAmelCase_ )
_UpperCamelCase : str = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
_UpperCamelCase : List[Any] = ldmad_pipe.to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_UpperCamelCase : str = self.get_inputs(lowerCAmelCase_ )
_UpperCamelCase : List[str] = ldmad_pipe(**lowerCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Dict = output.rgb, output.depth
_UpperCamelCase : List[str] = rgb[0, -3:, -3:, -1].flatten()
_UpperCamelCase : Union[str, Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_UpperCamelCase : Tuple = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
_UpperCamelCase : int = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[Any]="cpu" ,lowerCamelCase__ : List[Any]=torch.floataa ,lowerCamelCase__ : Dict=0 ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_UpperCamelCase : Dict = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 64, 64) )
_UpperCamelCase : Dict = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ ,dtype=lowerCAmelCase_ )
_UpperCamelCase : List[str] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : str = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_UpperCamelCase : Dict = self.get_inputs(lowerCAmelCase_ )
_UpperCamelCase : Union[str, Any] = ldmad_pipe(**lowerCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = output.rgb, output.depth
_UpperCamelCase : Dict = 0.4_9_5_5_8_6
_UpperCamelCase : Tuple = 0.3_3_7_9_5_5_1_5
_UpperCamelCase : Tuple = 1_1_2.4_8_5_1_8
_UpperCamelCase : Union[str, Any] = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(lowerCAmelCase_ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_UpperCamelCase : Union[str, Any] = self.get_inputs(lowerCAmelCase_ )
_UpperCamelCase : Optional[Any] = ldmad_pipe(**lowerCAmelCase_ )
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = output.rgb, output.depth
_UpperCamelCase : Optional[Any] = 0.4_1_9_4_1_2_7
_UpperCamelCase : Union[str, Any] = 0.3_5_3_7_5_5_8_6
_UpperCamelCase : Any = 0.5_6_3_8_5_0_2
_UpperCamelCase : Tuple = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 83 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = 42
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("DownEncoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_=True , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = torch.nn.Convad(
lowerCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
# down
_snake_case = block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_down_block(
lowerCAmelCase_ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# out
_snake_case = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = 2 * out_channels if double_z else out_channels
_snake_case = nn.Convad(block_out_channels[-1] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = x
_snake_case = self.conv_in(lowerCAmelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
for down_block in self.down_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ )
# middle
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase_ )
else:
# down
for down_block in self.down_blocks:
_snake_case = down_block(lowerCAmelCase_ )
# middle
_snake_case = self.mid_block(lowerCAmelCase_ )
# post-process
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_=3 , lowerCAmelCase_=3 , lowerCAmelCase_=("UpDecoderBlock2D",) , lowerCAmelCase_=(64,) , lowerCAmelCase_=2 , lowerCAmelCase_=32 , lowerCAmelCase_="silu" , lowerCAmelCase_="group" , ):
"""simple docstring"""
super().__init__()
_snake_case = layers_per_block
_snake_case = nn.Convad(
lowerCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_snake_case = None
_snake_case = nn.ModuleList([] )
_snake_case = in_channels if norm_type == 'spatial' else None
# mid
_snake_case = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , )
# up
_snake_case = list(reversed(lowerCAmelCase_ ) )
_snake_case = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
_snake_case = output_channel
_snake_case = reversed_block_out_channels[i]
_snake_case = i == len(lowerCAmelCase_ ) - 1
_snake_case = get_up_block(
lowerCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , prev_output_channel=lowerCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , resnet_time_scale_shift=lowerCAmelCase_ , )
self.up_blocks.append(lowerCAmelCase_ )
_snake_case = output_channel
# out
if norm_type == "spatial":
_snake_case = SpatialNorm(block_out_channels[0] , lowerCAmelCase_ )
else:
_snake_case = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase_ , eps=1E-6 )
_snake_case = nn.SiLU()
_snake_case = nn.Convad(block_out_channels[0] , lowerCAmelCase_ , 3 , padding=1 )
_snake_case = False
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
"""simple docstring"""
_snake_case = z
_snake_case = self.conv_in(lowerCAmelCase_ )
_snake_case = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase_ ):
def custom_forward(*lowerCAmelCase_ ):
return module(*lowerCAmelCase_ )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ )
else:
# middle
_snake_case = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# middle
_snake_case = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = sample.to(lowerCAmelCase_ )
# up
for up_block in self.up_blocks:
_snake_case = up_block(lowerCAmelCase_ , lowerCAmelCase_ )
# post-process
if latent_embeds is None:
_snake_case = self.conv_norm_out(lowerCAmelCase_ )
else:
_snake_case = self.conv_norm_out(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.conv_act(lowerCAmelCase_ )
_snake_case = self.conv_out(lowerCAmelCase_ )
return sample
class __UpperCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="random" , lowerCAmelCase_=False , lowerCAmelCase_=True ):
"""simple docstring"""
super().__init__()
_snake_case = n_e
_snake_case = vq_embed_dim
_snake_case = beta
_snake_case = legacy
_snake_case = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_snake_case = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
_snake_case = self.used.shape[0]
_snake_case = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_snake_case = self.re_embed
_snake_case = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
_snake_case = n_e
_snake_case = sane_index_shape
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
_snake_case = (inds[:, :, None] == used[None, None, ...]).long()
_snake_case = match.argmax(-1 )
_snake_case = match.sum(2 ) < 1
if self.unknown_index == "random":
_snake_case = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_snake_case = self.unknown_index
return new.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = inds.shape
assert len(lowerCAmelCase_ ) > 1
_snake_case = inds.reshape(ishape[0] , -1 )
_snake_case = self.used.to(lowerCAmelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
_snake_case = 0 # simply set to zero
_snake_case = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase_ )
return back.reshape(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = z.permute(0 , 2 , 3 , 1 ).contiguous()
_snake_case = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_snake_case = torch.argmin(torch.cdist(lowerCAmelCase_ , self.embedding.weight ) , dim=1 )
_snake_case = self.embedding(lowerCAmelCase_ ).view(z.shape )
_snake_case = None
_snake_case = None
# compute loss for embedding
if not self.legacy:
_snake_case = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_snake_case = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_snake_case = z + (z_q - z).detach()
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_snake_case = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_snake_case = self.remap_to_used(lowerCAmelCase_ )
_snake_case = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_snake_case = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if self.remap is not None:
_snake_case = indices.reshape(shape[0] , -1 ) # add batch axis
_snake_case = self.unmap_to_all(lowerCAmelCase_ )
_snake_case = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_snake_case = self.embedding(lowerCAmelCase_ )
if shape is not None:
_snake_case = z_q.view(lowerCAmelCase_ )
# reshape back to match original input shape
_snake_case = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = parameters
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , dim=1 )
_snake_case = torch.clamp(self.logvar , -30.0 , 20.0 )
_snake_case = deterministic
_snake_case = torch.exp(0.5 * self.logvar )
_snake_case = torch.exp(self.logvar )
if self.deterministic:
_snake_case = _snake_case = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase ( self , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = randn_tensor(
self.mean.shape , generator=lowerCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
_snake_case = self.mean + self.std * sample
return x
def lowerCamelCase ( self , lowerCAmelCase_=None ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=[1, 2, 3] ):
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
_snake_case = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
return self.mean
| 42 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_snake_case : List[str] = 'pytorch_model.bin'
_snake_case : str = 'pytorch_model.bin.index.json'
_snake_case : Dict = 'adapter_config.json'
_snake_case : Tuple = 'adapter_model.bin'
_snake_case : int = 'adapter_model.safetensors'
_snake_case : List[str] = 'tf_model.h5'
_snake_case : Optional[int] = 'tf_model.h5.index.json'
_snake_case : Optional[int] = 'model.ckpt'
_snake_case : List[Any] = 'flax_model.msgpack'
_snake_case : Dict = 'flax_model.msgpack.index.json'
_snake_case : List[str] = 'model.safetensors'
_snake_case : Optional[int] = 'model.safetensors.index.json'
_snake_case : Optional[int] = 'config.json'
_snake_case : Any = 'preprocessor_config.json'
_snake_case : str = FEATURE_EXTRACTOR_NAME
_snake_case : Optional[Any] = 'generation_config.json'
_snake_case : Optional[int] = 'modelcard.json'
_snake_case : Tuple = '▁'
_snake_case : Any = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_snake_case : str = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_snake_case : Union[str, Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_snake_case : Optional[int] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
if version.parse(UpperCamelCase ) < version.parse(UpperCamelCase ):
if "dev" in min_version:
_a = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
_a = f'This example requires a minimum version of {min_version},'
error_message += f' but the version found is {__version__}.\n'
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 179 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def snake_case_ (UpperCamelCase : Dict ):
'''simple docstring'''
_a = {}
_a = job['''started_at''']
_a = job['''completed_at''']
_a = date_parser.parse(UpperCamelCase )
_a = date_parser.parse(UpperCamelCase )
_a = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_a = start
_a = end
_a = duration_in_min
return job_info
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int=None ):
'''simple docstring'''
_a = None
if token is not None:
_a = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'}
_a = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
_a = requests.get(UpperCamelCase , headers=UpperCamelCase ).json()
_a = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(UpperCamelCase ) for job in result['''jobs''']} )
_a = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(UpperCamelCase ):
_a = requests.get(url + f'&page={i + 2}' , headers=UpperCamelCase ).json()
job_time.update({job['''name''']: extract_time_from_single_job(UpperCamelCase ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
_snake_case : Tuple = parser.parse_args()
_snake_case : int = get_job_time(args.workflow_run_id)
_snake_case : int = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 179 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( _lowerCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = XLMTokenizer
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
snake_case_ = dict(zip(a_, range(len(a_))))
snake_case_ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w') as fp:
fp.write(json.dumps(a_))
with open(self.merges_file, 'w') as fp:
fp.write('\n'.join(a_))
def a_ ( self, lowerCAmelCase__) -> str:
snake_case_ = """lower newer"""
snake_case_ = """lower newer"""
return input_text, output_text
def a_ ( self) -> int:
snake_case_ = XLMTokenizer(self.vocab_file, self.merges_file)
snake_case_ = """lower"""
snake_case_ = ["""low""", """er</w>"""]
snake_case_ = tokenizer.tokenize(a_)
self.assertListEqual(a_, a_)
snake_case_ = tokens + ["""<unk>"""]
snake_case_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_), a_)
@slow
def a_ ( self) -> Union[str, Any]:
snake_case_ = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
snake_case_ = tokenizer.encode('sequence builders', add_special_tokens=a_)
snake_case_ = tokenizer.encode('multi-sequence build', add_special_tokens=a_)
snake_case_ = tokenizer.build_inputs_with_special_tokens(a_)
snake_case_ = tokenizer.build_inputs_with_special_tokens(a_, a_)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 69 |
'''simple docstring'''
from __future__ import annotations
import queue
class lowercase :
"""simple docstring"""
def __init__( self ,a_ ) -> str:
_UpperCAmelCase : Optional[Any] = data
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Union[str, Any] = None
def snake_case_ ( )-> TreeNode:
'''simple docstring'''
print("""\n********Press N to stop entering at any point of time********\n""" )
_UpperCAmelCase : Any = input("""Enter the value of the root node: """ ).strip().lower()
_UpperCAmelCase : queue.Queue = queue.Queue()
_UpperCAmelCase : List[str] = TreeNode(int(lowerCAmelCase_ ) )
q.put(lowerCAmelCase_ )
while not q.empty():
_UpperCAmelCase : str = q.get()
_UpperCAmelCase : Any = F'''Enter the left node of {node_found.data}: '''
_UpperCAmelCase : Union[str, Any] = input(lowerCAmelCase_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_UpperCAmelCase : List[str] = TreeNode(int(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[int] = left_node
q.put(lowerCAmelCase_ )
_UpperCAmelCase : Dict = F'''Enter the right node of {node_found.data}: '''
_UpperCAmelCase : Tuple = input(lowerCAmelCase_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_UpperCAmelCase : Any = TreeNode(int(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[Any] = right_node
q.put(lowerCAmelCase_ )
raise
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase : queue.Queue = queue.Queue()
q.put(lowerCAmelCase_ )
while not q.empty():
_UpperCAmelCase : Dict = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase : queue.Queue = queue.Queue()
q.put(lowerCAmelCase_ )
while not q.empty():
_UpperCAmelCase : Optional[int] = []
while not q.empty():
_UpperCAmelCase : Optional[int] = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase : list[TreeNode] = []
_UpperCAmelCase : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = n.left
# end of while means current node doesn't have left child
_UpperCAmelCase : int = stack.pop()
# start to traverse its right child
_UpperCAmelCase : Any = n.right
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase : list[TreeNode] = []
_UpperCAmelCase : Optional[Any] = node
while n or stack:
while n:
stack.append(lowerCAmelCase_ )
_UpperCAmelCase : Tuple = n.left
_UpperCAmelCase : Union[str, Any] = stack.pop()
print(n.data , end=""",""" )
_UpperCAmelCase : Any = n.right
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not node:
return
_UpperCAmelCase ,_UpperCAmelCase : str = [], []
_UpperCAmelCase : Dict = node
stacka.append(lowerCAmelCase_ )
while stacka: # to find the reversed order of post order, store it in stack2
_UpperCAmelCase : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowerCAmelCase_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def snake_case_ ( lowerCAmelCase_ = "" , lowerCAmelCase_=50 , lowerCAmelCase_="*" )-> str:
'''simple docstring'''
if not s:
return "\n" + width * char
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = divmod(width - len(lowerCAmelCase_ ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
A_ : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 5_0 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 215 | 0 |
import numpy as np
def lowerCAmelCase_ ( __UpperCAmelCase: np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247 |
from __future__ import annotations
import bisect
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int , __UpperCAmelCase: int = 0 , __UpperCAmelCase: int = -1 ) -> int:
if hi < 0:
UpperCamelCase__ : Union[str, Any] = len(__UpperCAmelCase )
while lo < hi:
UpperCamelCase__ : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
UpperCamelCase__ : Optional[int] = mid + 1
else:
UpperCamelCase__ : Tuple = mid
return lo
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int , __UpperCAmelCase: int = 0 , __UpperCAmelCase: int = -1 ) -> int:
if hi < 0:
UpperCamelCase__ : int = len(__UpperCAmelCase )
while lo < hi:
UpperCamelCase__ : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
UpperCamelCase__ : Optional[int] = mid + 1
else:
UpperCamelCase__ : int = mid
return lo
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int , __UpperCAmelCase: int = 0 , __UpperCAmelCase: int = -1 ) -> None:
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int , __UpperCAmelCase: int = 0 , __UpperCAmelCase: int = -1 ) -> None:
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int ) -> int | None:
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : List[str] = len(__UpperCAmelCase ) - 1
while left <= right:
UpperCamelCase__ : List[str] = left + (right - left) // 2
UpperCamelCase__ : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
UpperCamelCase__ : List[str] = midpoint - 1
else:
UpperCamelCase__ : List[str] = midpoint + 1
return None
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int ) -> int | None:
UpperCamelCase__ : Union[str, Any] = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] , __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int ) -> int | None:
if right < left:
return None
UpperCamelCase__ : Optional[int] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by comma:\n').strip()
UpperCAmelCase_ = sorted(int(item) for item in user_input.split(','))
UpperCAmelCase_ = int(input('Enter a single number to be found in the list:\n'))
UpperCAmelCase_ = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 247 | 1 |
import argparse
from collections import defaultdict
import yaml
__UpperCamelCase : List[Any] = "docs/source/en/_toctree.yml"
def __A ( __lowerCamelCase ) -> Tuple:
a = defaultdict(_SCREAMING_SNAKE_CASE )
for doc in model_doc:
counts[doc["local"]] += 1
a = [key for key, value in counts.items() if value > 1]
a = []
for duplicate_key in duplicates:
a = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(_SCREAMING_SNAKE_CASE ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(_SCREAMING_SNAKE_CASE , key=lambda __lowerCamelCase : s["title"].lower() )
def __A ( __lowerCamelCase=False ) -> Dict:
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
a = yaml.safe_load(f.read() )
# Get to the API doc
a = 0
while content[api_idx]["title"] != "API":
api_idx += 1
a = content[api_idx]['sections']
# Then to the model doc
a = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
a = api_doc[model_idx]['sections']
a = [(idx, section) for idx, section in enumerate(_SCREAMING_SNAKE_CASE ) if 'sections' in section]
a = False
for idx, modality_doc in modalities_docs:
a = modality_doc['sections']
a = clean_model_doc_toc(_SCREAMING_SNAKE_CASE )
if old_modality_doc != new_modality_doc:
a = True
if overwrite:
a = new_modality_doc
if diff:
if overwrite:
a = model_doc
a = api_doc
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(_SCREAMING_SNAKE_CASE , allow_unicode=_SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCamelCase : Optional[int] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 228 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 293 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Any = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'fnet'
def __init__( self : int , lowerCamelCase : List[str]=3_20_00 , lowerCamelCase : str=7_68 , lowerCamelCase : Tuple=12 , lowerCamelCase : List[Any]=30_72 , lowerCamelCase : Optional[Any]="gelu_new" , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Any=5_12 , lowerCamelCase : List[str]=4 , lowerCamelCase : Any=0.02 , lowerCamelCase : Union[str, Any]=1E-12 , lowerCamelCase : Optional[int]=False , lowerCamelCase : Optional[int]=5_12 , lowerCamelCase : Any=3 , lowerCamelCase : Tuple=1 , lowerCamelCase : Optional[int]=2 , **lowerCamelCase : Dict , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : Tuple = layer_norm_eps
lowerCAmelCase_ : Union[str, Any] = use_tpu_fourier_optimizations
lowerCAmelCase_ : Dict = tpu_short_seq_length
| 354 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__A : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCAmelCase_ : str = get_sagemaker_input()
else:
lowerCAmelCase_ : Optional[int] = get_cluster_input()
return config
def UpperCamelCase_ ( A__ : Optional[Any]=None ):
'''simple docstring'''
if subparsers is not None:
lowerCAmelCase_ : List[str] = subparsers.add_parser("""config""" , description=A__ )
else:
lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser("""Accelerate config command""" , description=A__ )
parser.add_argument(
"""--config_file""" , default=A__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def UpperCamelCase_ ( A__ : Any ):
'''simple docstring'''
lowerCAmelCase_ : Dict = get_user_input()
if args.config_file is not None:
lowerCAmelCase_ : List[str] = args.config_file
else:
if not os.path.isdir(A__ ):
os.makedirs(A__ )
lowerCAmelCase_ : List[Any] = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(A__ )
else:
config.to_yaml_file(A__ )
print(f'accelerate configuration saved at {config_file}' )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : str = config_command_parser()
lowerCAmelCase_ : Tuple = parser.parse_args()
config_command(A__ )
if __name__ == "__main__":
main()
| 89 | 0 |
import random
from typing import Any
def __lowerCamelCase ( UpperCAmelCase_ : list ):
"""simple docstring"""
for _ in range(len(UpperCAmelCase_ ) ):
a :str = random.randint(0 , len(UpperCAmelCase_ ) - 1 )
a :Dict = random.randint(0 , len(UpperCAmelCase_ ) - 1 )
a , a :str = data[b], data[a]
return data
if __name__ == "__main__":
snake_case : List[str] = [0, 1, 2, 3, 4, 5, 6, 7]
snake_case : List[Any] = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 94 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
_UpperCamelCase = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
_UpperCamelCase = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
_UpperCamelCase = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def __A ( self ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase="binary" , __UpperCAmelCase=None ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = fa_score(
__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase , pos_label=__UpperCAmelCase , average=__UpperCAmelCase , sample_weight=__UpperCAmelCase )
return {"f1": float(__UpperCAmelCase ) if score.size == 1 else score}
| 254 | 0 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __lowerCamelCase ( lowerCAmelCase__ ):
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCAmelCase__ = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
lowerCAmelCase__ = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
lowerCAmelCase__ = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
lowerCAmelCase__ = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
lowerCAmelCase__ = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
lowerCAmelCase__ = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
lowerCAmelCase__ = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
lowerCAmelCase__ = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
lowerCAmelCase__ = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
lowerCAmelCase__ = key.replace('image_encoder.module' , 'flava.image_model' )
lowerCAmelCase__ = key.replace('text_encoder.module' , 'flava.text_model' )
lowerCAmelCase__ = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
lowerCAmelCase__ = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
lowerCAmelCase__ = key.replace('text_projection' , 'flava.text_projection' )
lowerCAmelCase__ = key.replace('image_projection' , 'flava.image_projection' )
lowerCAmelCase__ = value.float()
for key, value in codebook_state_dict.items():
lowerCAmelCase__ = value
return upgrade
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ):
if config_path is not None:
lowerCAmelCase__ = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ = FlavaConfig()
lowerCAmelCase__ = FlavaForPreTraining(SCREAMING_SNAKE_CASE_ ).eval()
lowerCAmelCase__ = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , save_checkpoint=SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
else:
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
lowerCAmelCase__ = upgrade_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = hf_model.state_dict()
lowerCAmelCase__ = count_parameters(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = count_parameters(SCREAMING_SNAKE_CASE_ ) + count_parameters(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowerCAmelCase__ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 366 | import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
lowerCAmelCase__ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = {}
with open(lowerCAmelCase__ , 'r' ) as file:
for line_number, line in enumerate(lowerCAmelCase__ ):
lowerCAmelCase__ = line.strip()
if line:
lowerCAmelCase__ = line.split()
lowerCAmelCase__ = line_number
lowerCAmelCase__ = words[0]
lowerCAmelCase__ = value
return result
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for attribute in key.split('.' ):
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase__ ):
lowerCAmelCase__ = PARAM_MAPPING[full_name.split('.' )[-1]]
lowerCAmelCase__ = 'param'
if weight_type is not None and weight_type != "param":
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
elif weight_type is not None and weight_type == "param":
lowerCAmelCase__ = hf_pointer
for attribute in hf_param_name.split('.' ):
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = shape_pointer.shape
# let's reduce dimension
lowerCAmelCase__ = value[0]
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
lowerCAmelCase__ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCAmelCase__ ):
lowerCAmelCase__ = PARAM_MAPPING[full_name.split('.' )[-1]]
lowerCAmelCase__ = 'param'
if weight_type is not None and weight_type != "param":
lowerCAmelCase__ = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCAmelCase__ = '.'.join([key, hf_param_name] )
else:
lowerCAmelCase__ = key
lowerCAmelCase__ = value if 'lm_head' in full_key else value[0]
lowerCAmelCase__ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
lowerCAmelCase__ = False
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(lowerCAmelCase__ )[0].split('.' )[-2]
lowerCAmelCase__ = mapped_key.replace('*' , lowerCAmelCase__ )
if "weight_g" in name:
lowerCAmelCase__ = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase__ = 'weight_v'
elif "bias" in name:
lowerCAmelCase__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ = 'weight'
else:
lowerCAmelCase__ = None
if hf_dict is not None:
rename_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return is_used
return is_used
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , )
lowerCAmelCase__ = True
else:
lowerCAmelCase__ = load_wavaveca_layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = full_name.split('conv_layers.' )[-1]
lowerCAmelCase__ = name.split('.' )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=False ):
if config_path is not None:
lowerCAmelCase__ = WavaVecaConfig.from_pretrained(lowerCAmelCase__ )
else:
lowerCAmelCase__ = WavaVecaConfig()
if is_seq_class:
lowerCAmelCase__ = read_txt_into_dict(lowerCAmelCase__ )
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = WavaVecaForSequenceClassification(lowerCAmelCase__ )
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
feature_extractor.save_pretrained(lowerCAmelCase__ )
elif is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(lowerCAmelCase__ , 'vocab.json' )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
lowerCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase__ , )
lowerCAmelCase__ = True if config.feat_extract_norm == 'layer' else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
lowerCAmelCase__ = WavaVecaForCTC(lowerCAmelCase__ )
else:
lowerCAmelCase__ = WavaVecaForPreTraining(lowerCAmelCase__ )
if is_finetuned or is_seq_class:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCAmelCase__ = argparse.Namespace(task='audio_pretraining' )
lowerCAmelCase__ = fairseq.tasks.setup_task(lowerCAmelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase__ )
lowerCAmelCase__ = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 119 | 0 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def _snake_case ( ):
UpperCAmelCase : List[Any] = torch.nn.Linear(2 , 4 )
UpperCAmelCase : Optional[int] = torch.optim.AdamW(model.parameters() , lr=1.0 )
UpperCAmelCase : int = torch.optim.lr_scheduler.OneCycleLR(UpperCamelCase , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
UpperCAmelCase : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
UpperCAmelCase : Tuple = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def _snake_case ( UpperCamelCase : int ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def _snake_case ( UpperCamelCase : List[Any] ):
UpperCAmelCase : Tuple = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@require_cuda
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[int] = Accelerator(cpu=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Any = Accelerator()
UpperCAmelCase : List[str] = GradientState()
assert state.num_steps == 1
UpperCAmelCase : Optional[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
UpperCAmelCase : List[str] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = create_components()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Union[str, Any] = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = create_components()
accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
pass
with patch("""torch.cuda.set_device""" , _SCREAMING_SNAKE_CASE ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
UpperCAmelCase : List[str] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Any = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = create_components()
accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = get_signature(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_SCREAMING_SNAKE_CASE )
# make sure random weights don't match
load_random_weights(_SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(_SCREAMING_SNAKE_CASE ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(_SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(_SCREAMING_SNAKE_CASE ) ) < 1E-3 )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = create_components()
accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = get_signature(_SCREAMING_SNAKE_CASE )
# saving hook
def save_config(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : int = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(_SCREAMING_SNAKE_CASE , """data.json""" ) , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# loading hook
def load_config(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with open(os.path.join(_SCREAMING_SNAKE_CASE , """data.json""" ) , """r""" ) as f:
UpperCAmelCase : Union[str, Any] = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = config["""class_name"""]
UpperCAmelCase : Union[str, Any] = accelerator.register_save_state_pre_hook(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = accelerator.register_load_state_pre_hook(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_SCREAMING_SNAKE_CASE )
# make sure random weights don't match with hooks
load_random_weights(_SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(_SCREAMING_SNAKE_CASE ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCAmelCase : Union[str, Any] = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(_SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(_SCREAMING_SNAKE_CASE ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_SCREAMING_SNAKE_CASE )
# make sure random weights don't match with hooks removed
load_random_weights(_SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(_SCREAMING_SNAKE_CASE ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCAmelCase : List[Any] = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(_SCREAMING_SNAKE_CASE )
self.assertTrue(abs(model_signature - get_signature(_SCREAMING_SNAKE_CASE ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[str] = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = create_components()
UpperCAmelCase : List[str] = None
# This should work
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = create_components()
UpperCAmelCase : Union[str, Any] = [1, 2, 3]
# This should work
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(
getattr(_SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(_SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(_SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(_SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(_SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(_SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=_SCREAMING_SNAKE_CASE , device_map={"""""": 0} , )
UpperCAmelCase : Union[str, Any] = Accelerator()
# This should work
UpperCAmelCase : str = accelerator.prepare(_SCREAMING_SNAKE_CASE )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
from transformers import AutoModelForCausalLM
UpperCAmelCase : Any = Accelerator()
with init_empty_weights():
UpperCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
UpperCAmelCase : Any = infer_auto_device_map(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = """cpu"""
UpperCAmelCase : List[str] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=_SCREAMING_SNAKE_CASE , load_in_abit=_SCREAMING_SNAKE_CASE , llm_inta_enable_fpaa_cpu_offload=_SCREAMING_SNAKE_CASE )
# This should not work and get value error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Union[str, Any] = accelerator.prepare(_SCREAMING_SNAKE_CASE )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
UpperCAmelCase : Dict = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
UpperCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
UpperCAmelCase : List[str] = infer_auto_device_map(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Optional[int] = Accelerator()
# This should not work and get value error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Union[str, Any] = accelerator.prepare(_SCREAMING_SNAKE_CASE )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
UpperCAmelCase : List[str] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
UpperCAmelCase : str = infer_auto_device_map(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = 1
UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=_SCREAMING_SNAKE_CASE , device_map=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : int = Accelerator()
# This should work
UpperCAmelCase : List[str] = accelerator.prepare(_SCREAMING_SNAKE_CASE )
@require_cuda
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = torch.nn.Linear(10 , 10 )
UpperCAmelCase : int = torch.optim.SGD(model.parameters() , lr=0.01 )
UpperCAmelCase : str = Accelerator(cpu=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = accelerator.prepare(_SCREAMING_SNAKE_CASE )
| 109 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __a ( A__ ):
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : List[str] = tempfile.mkdtemp()
UpperCamelCase__ : Any = 8
# DPR tok
UpperCamelCase__ : List[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase__ : Any = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
UpperCamelCase__ : str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase__ : int = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase__ : Optional[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase__ : Union[str, Any] = {"unk_token": "<unk>"}
UpperCamelCase__ : Dict = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ : str = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def __lowercase ( self : int ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Tuple = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : int = self.get_dummy_dataset()
UpperCamelCase__ : List[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
UpperCamelCase__ : str = dataset
UpperCamelCase__ : Optional[int] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : bool ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.get_dummy_dataset()
UpperCamelCase__ : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , "dataset" )
UpperCamelCase__ : List[str] = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
UpperCamelCase__ : str = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCamelCase__ : List[Any] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE ) , )
return retriever
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : int = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase__ : List[str] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
UpperCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
UpperCamelCase__ : Tuple = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE , open(SCREAMING_SNAKE_CASE , "wb" ) )
UpperCamelCase__ : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
UpperCamelCase__ : List[str] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Tuple = self.get_dummy_canonical_hf_index_retriever()
UpperCamelCase__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
UpperCamelCase__ : Optional[int] = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : Any = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : int = 1
UpperCamelCase__ : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Any = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : Any = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : int = 1
UpperCamelCase__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : Union[str, Any] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : List[str] = 1
UpperCamelCase__ : Any = self.get_dummy_legacy_index_retriever()
UpperCamelCase__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : int = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __lowercase ( self : int ):
'''simple docstring'''
import torch
UpperCamelCase__ : Optional[Any] = 1
UpperCamelCase__ : Optional[int] = self.get_dummy_canonical_hf_index_retriever()
UpperCamelCase__ : Optional[Any] = [[5, 7], [10, 11]]
UpperCamelCase__ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : int = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
UpperCamelCase__ : List[Any] = retriever(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.get_dpr_ctx_encoder_tokenizer()
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = [[5, 7], [10, 11]]
UpperCamelCase__ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase__ : Optional[int] = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(
len(SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary. | 189 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : UNetaDModel
_lowerCamelCase : ScoreSdeVeScheduler
def __init__( self : Tuple , snake_case_ : UNetaDModel , snake_case_ : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : List[Any] , snake_case_ : int = 1 , snake_case_ : int = 2_0_0_0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , **snake_case_ : List[str] , ):
_UpperCAmelCase = self.unet.config.sample_size
_UpperCAmelCase = (batch_size, 3, img_size, img_size)
_UpperCAmelCase = self.unet
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ ) * self.scheduler.init_noise_sigma
_UpperCAmelCase = sample.to(self.device )
self.scheduler.set_timesteps(snake_case_ )
self.scheduler.set_sigmas(snake_case_ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_UpperCAmelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
_UpperCAmelCase = self.scheduler.step_correct(snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
# prediction step
_UpperCAmelCase = model(snake_case_ , snake_case_ ).sample
_UpperCAmelCase = self.scheduler.step_pred(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = output.prev_sample, output.prev_sample_mean
_UpperCAmelCase = sample_mean.clamp(0 , 1 )
_UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case_ )
| 156 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Dict ) -> Dict:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Optional[int] , __lowercase : List[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : str , __lowercase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = ParquetDatasetReader(__lowercase , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Optional[Any] , __lowercase : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , split=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : Tuple ) -> Tuple:
'''simple docstring'''
if issubclass(__lowercase , __lowercase ):
_UpperCAmelCase = parquet_path
elif issubclass(__lowercase , __lowercase ):
_UpperCAmelCase = [parquet_path]
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any]=("train",) ) -> List[str]:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase )
for split in splits:
_UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : str , __lowercase : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = ParquetDatasetReader({"train": parquet_path} , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int ) -> List[Any]:
'''simple docstring'''
if split:
_UpperCAmelCase = {split: parquet_path}
else:
_UpperCAmelCase = "train"
_UpperCAmelCase = {"train": parquet_path, "test": parquet_path}
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ParquetDatasetWriter(__lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_UpperCAmelCase = pq.ParquetFile(tmp_path / "foo.parquet" )
_UpperCAmelCase = pf.read()
assert dataset.data.table == output_table
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = str(shared_datadir / "test_image_rgb.jpg" )
_UpperCAmelCase = {"image": [image_path]}
_UpperCAmelCase = Features({"image": Image()} )
_UpperCAmelCase = Dataset.from_dict(__lowercase , features=__lowercase )
_UpperCAmelCase = ParquetDatasetWriter(__lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_UpperCAmelCase = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_UpperCAmelCase = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> Optional[int]:
'''simple docstring'''
assert get_writer_batch_size(__lowercase ) == expected
| 156 | 1 |
'''simple docstring'''
import functools
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ) or not all(isinstance(UpperCamelCase , UpperCamelCase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(UpperCamelCase ) != 3 or not all(isinstance(UpperCamelCase , UpperCamelCase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(UpperCamelCase ) == 0:
return 0
if min(UpperCamelCase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(UpperCamelCase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
lowerCAmelCase__ : Any = set(UpperCamelCase )
@functools.cache
def dynamic_programming(UpperCamelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCAmelCase__ = 'http://www.mocksite.com/file1.txt'
UpperCAmelCase__ = '"text": ["foo", "foo"]'
UpperCAmelCase__ = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class lowerCAmelCase__ :
__a = 200
__a = {"""Content-Length""": """100"""}
__a = {}
def lowercase ( self : List[str] , **_lowerCamelCase : List[str] ):
return [bytes(_lowerCamelCase , '''utf-8''' )]
def _UpperCAmelCase ( *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> Dict:
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str ) -> int:
import requests
monkeypatch.setattr(__lowerCamelCase , '''request''' , __lowerCamelCase )
_snake_case = URL
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = url
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [url]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = {'''train''': url}
_snake_case = '''dummy'''
_snake_case = '''downloads'''
_snake_case = tmp_path
_snake_case = DownloadConfig(
cache_dir=os.path.join(__lowerCamelCase , __lowerCamelCase ) , use_etag=__lowerCamelCase , )
_snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
_snake_case = dl_manager.download(__lowerCamelCase )
_snake_case = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [downloaded_paths]
_snake_case = [urls]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in downloaded_paths.keys()
_snake_case = downloaded_paths.values()
_snake_case = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__lowerCamelCase , __lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_snake_case = Path(__lowerCamelCase )
_snake_case = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_snake_case = downloaded_path.read_text()
assert content == CONTENT
_snake_case = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
_snake_case = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> int:
_snake_case = str(__lowerCamelCase )
if issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = filename
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [filename]
elif issubclass(__lowerCamelCase , __lowerCamelCase ):
_snake_case = {'''train''': filename}
_snake_case = '''dummy'''
_snake_case = xz_file.parent
_snake_case = '''extracted'''
_snake_case = DownloadConfig(
cache_dir=__lowerCamelCase , use_etag=__lowerCamelCase , )
_snake_case = DownloadManager(dataset_name=__lowerCamelCase , download_config=__lowerCamelCase )
_snake_case = dl_manager.extract(__lowerCamelCase )
_snake_case = paths
for extracted_paths in [extracted_paths]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = [extracted_paths]
_snake_case = [paths]
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
assert "train" in extracted_paths.keys()
_snake_case = extracted_paths.values()
_snake_case = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__lowerCamelCase , __lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_snake_case = Path(__lowerCamelCase )
_snake_case = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__lowerCamelCase , etag=__lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_snake_case = extracted_path.read_text()
_snake_case = text_file.read_text()
assert extracted_file_content == expected_file_content
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ) -> Dict:
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__lowerCamelCase , start=1 ):
_snake_case = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : str ) -> Dict:
_snake_case = request.getfixturevalue(__lowerCamelCase )
_snake_case = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Tuple:
_snake_case = request.getfixturevalue(__lowerCamelCase )
_snake_case = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__lowerCamelCase ) , start=1 ):
_test_jsonl(__lowerCamelCase , __lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> List[Any]:
_snake_case = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__lowerCamelCase ) , start=1 ):
assert os.path.basename(__lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 288 | 0 |
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case_ : Any = str(abs(_UpperCamelCase ) )
snake_case_ : Union[str, Any] = [list(_UpperCamelCase ) for char in range(len(_UpperCamelCase ) )]
for index in range(len(_UpperCamelCase ) ):
num_transpositions[index].pop(_UpperCamelCase )
return max(
int(''''''.join(list(_UpperCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 369 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase_ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCAmelCase_ = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = VOCAB_FILES_NAMES
lowerCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[Any] = BlenderbotSmallTokenizer
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__="<|endoftext|>" , __magic_name__="<|endoftext|>" , __magic_name__="<|endoftext|>" , __magic_name__=False , __magic_name__=True , **__magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=__magic_name__ , merges=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , ) , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , **__magic_name__ , )
snake_case_ : Optional[int] = add_prefix_space
def lowerCamelCase (self , __magic_name__ , __magic_name__=None ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ : List[str] = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 279 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[int] = filter(lambda snake_case__ : p.requires_grad , model.parameters() )
__UpperCamelCase : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_lowerCAmelCase = logging.getLogger(__name__)
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
if metric == "rouge2":
__UpperCamelCase : Optional[Any] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
__UpperCamelCase : int = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
__UpperCamelCase : Optional[Any] = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
__UpperCamelCase : List[Any] = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
" function." )
__UpperCamelCase : int = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
return EarlyStopping(
monitor=F"val_{metric}" , mode="min" if "loss" in metric else "max" , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class A ( pl.Callback ):
'''simple docstring'''
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> str:
__UpperCamelCase : Optional[int] = {f"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_UpperCAmelCase )
@rank_zero_only
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True ) -> None:
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
__UpperCamelCase : Optional[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
__UpperCamelCase : Any = Path(pl_module.hparams.output_dir )
if type_path == "test":
__UpperCamelCase : Tuple = od / "test_results.txt"
__UpperCamelCase : int = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCamelCase : Optional[int] = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
__UpperCamelCase : Optional[Any] = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=_UpperCAmelCase )
generations_file.parent.mkdir(exist_ok=_UpperCAmelCase )
with open(_UpperCAmelCase , "a+" ) as writer:
for key in sorted(_UpperCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCamelCase : Optional[Any] = metrics[key]
if isinstance(_UpperCAmelCase , torch.Tensor ):
__UpperCamelCase : Optional[Any] = val.item()
__UpperCamelCase : List[Any] = f"{key}: {val:.6f}\n"
writer.write(_UpperCAmelCase )
if not save_generations:
return
if "preds" in metrics:
__UpperCamelCase : Optional[int] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_UpperCAmelCase )
@rank_zero_only
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> str:
try:
__UpperCamelCase : str = pl_module.model.model.num_parameters()
except AttributeError:
__UpperCamelCase : Any = pl_module.model.num_parameters()
__UpperCamelCase : List[Any] = count_trainable_parameters(_UpperCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_UpperCAmelCase , _UpperCAmelCase , "test" )
@rank_zero_only
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 298 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A ( _UpperCAmelCase : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_UpperCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = digit
if sudoku(_UpperCAmelCase ) is not None:
return grid
_UpperCAmelCase = 0
return None
def A ( _UpperCAmelCase : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_UpperCAmelCase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 339 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = """▁"""
snake_case_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
snake_case_ = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
snake_case_ = {
"""facebook/xglm-564M""": 2048,
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self :str , lowercase_ :Optional[Any] , lowercase_ :Tuple="<s>" , lowercase_ :Any="</s>" , lowercase_ :str="</s>" , lowercase_ :Any="<s>" , lowercase_ :Optional[Any]="<unk>" , lowercase_ :Optional[int]="<pad>" , lowercase_ :Optional[Dict[str, Any]] = None , **lowercase_ :int , ) -> None:
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCAmelCase = 7
UpperCAmelCase = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
UpperCAmelCase = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCAmelCase = len(self.sp_model )
UpperCAmelCase = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowercase_ )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :Any ) -> int:
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self :List[str] , lowercase_ :Dict ) -> Optional[Any]:
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase__ ( self :Tuple , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None , lowercase_ :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ ))
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ ))
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase__ ( self :Any ) -> Tuple:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Dict:
UpperCAmelCase = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self :Any , lowercase_ :str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Tuple ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Tuple ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self :Dict , lowercase_ :List[str] ) -> List[str]:
UpperCAmelCase = ''.join(lowercase_ ).replace(lowercase_ , ' ' ).strip()
return out_string
def UpperCAmelCase__ ( self :List[str] , lowercase_ :str , lowercase_ :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 181 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase_ )
if number < 1:
UpperCAmelCase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowercase_ )
UpperCAmelCase = 1
for i in range(1 , lowercase_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : str=30 , UpperCAmelCase_ : Union[str, Any]=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Any=0.9 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[int]=[0.5, 0.5, 0.5] , ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = size if size is not None else {"""shortest_edge""": 30}
snake_case_ = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize_and_center_crop
snake_case_ = size
snake_case_ = crop_pct
snake_case_ = crop_size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
def lowerCAmelCase ( self : int ) ->Tuple:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __A (_lowerCamelCase , unittest.TestCase):
'''simple docstring'''
__lowercase: Union[str, Any] = PoolFormerImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
snake_case_ = PoolFormerImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """crop_pct""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """image_std""" ) )
def lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase ( self : Union[str, Any] ) ->Any:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 347 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A = 1_000_000 ) -> int:
_snake_case = limit + 1
_snake_case = [0] * limit
for first_term in range(1 , __A ):
for n in range(__A , __A , __A ):
_snake_case = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_snake_case = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42 | 0 |
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 369 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
while b:
UpperCAmelCase__ , UpperCAmelCase__ = b, a % b
return a
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE__ , a % b )
def _UpperCamelCase ( ):
'''simple docstring'''
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 61 | 0 |
'''simple docstring'''
import math
def lowercase_ ( lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
"""simple docstring"""
if (
not isinstance(lowerCAmelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def lowercase_ ( lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
"""simple docstring"""
if (
not isinstance(lowerCAmelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ):
"""simple docstring"""
return params[f'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any="attention" ):
"""simple docstring"""
__UpperCAmelCase : int = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
__UpperCAmelCase : Tuple = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__UpperCAmelCase : Tuple = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
__UpperCAmelCase : List[str] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__UpperCAmelCase : List[str] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
__UpperCAmelCase : List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__UpperCAmelCase : Optional[Any] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
__UpperCAmelCase : Dict = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any]=False ):
"""simple docstring"""
if split_mlp_wi:
__UpperCAmelCase : List[str] = params[f'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
__UpperCAmelCase : Union[str, Any] = params[f'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
__UpperCAmelCase : Dict = (wi_a, wi_a)
else:
__UpperCAmelCase : Union[str, Any] = params[f'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
__UpperCAmelCase : Tuple = params[f'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return params[f'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def lowercase_ ( lowerCAmelCase__ : dict , *, lowerCAmelCase__ : int , lowerCAmelCase__ : bool , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
__UpperCAmelCase : Tuple = traverse_util.flatten_dict(variables["""target"""] )
__UpperCAmelCase : Union[str, Any] = {"""/""".join(lowerCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCAmelCase : Any = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowerCAmelCase__ )
__UpperCAmelCase : Any = collections.OrderedDict()
# Shared embeddings.
__UpperCAmelCase : int = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase : Union[str, Any] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_attention_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """attention""" )
__UpperCAmelCase : Any = layer_norm
__UpperCAmelCase : List[Any] = k.T
__UpperCAmelCase : Optional[int] = o.T
__UpperCAmelCase : str = q.T
__UpperCAmelCase : Any = v.T
# Block i, layer 1 (MLP).
__UpperCAmelCase : List[str] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_mlp_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase : int = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = layer_norm
if split_mlp_wi:
__UpperCAmelCase : List[Any] = wi[0].T
__UpperCAmelCase : Any = wi[1].T
else:
__UpperCAmelCase : Tuple = wi.T
__UpperCAmelCase : Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCAmelCase : Dict = tax_relpos_bias_lookup(
lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" ).T
__UpperCAmelCase : Optional[int] = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
__UpperCAmelCase : Any = tax_relpos_bias_lookup(
lowerCAmelCase__ , 0 , """encoder""" ).T
__UpperCAmelCase : Dict = tax_relpos_bias_lookup(
lowerCAmelCase__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase : str = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_self_attention_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """self_attention""" )
__UpperCAmelCase : int = layer_norm
__UpperCAmelCase : Optional[Any] = k.T
__UpperCAmelCase : Dict = o.T
__UpperCAmelCase : int = q.T
__UpperCAmelCase : List[str] = v.T
# Block i, layer 1 (Cross Attention).
__UpperCAmelCase : Any = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """encoder_decoder_attention""" )
__UpperCAmelCase : Union[str, Any] = layer_norm
__UpperCAmelCase : List[Any] = k.T
__UpperCAmelCase : int = o.T
__UpperCAmelCase : Optional[int] = q.T
__UpperCAmelCase : Optional[int] = v.T
# Block i, layer 2 (MLP).
__UpperCAmelCase : Tuple = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_mlp_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase : Any = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = layer_norm
if split_mlp_wi:
__UpperCAmelCase : Optional[Any] = wi[0].T
__UpperCAmelCase : Optional[int] = wi[1].T
else:
__UpperCAmelCase : str = wi.T
__UpperCAmelCase : int = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCAmelCase : Union[str, Any] = tax_relpos_bias_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" ).T
__UpperCAmelCase : Dict = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCAmelCase : List[str] = old["""decoder/logits_dense/kernel"""].T
return new
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : bool ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase : str = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase : List[str] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
__UpperCAmelCase : Union[str, Any] = state_dict["""shared.weight"""]
return state_dict
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase : Tuple = checkpoints.load_tax_checkpoint(lowerCAmelCase__ )
__UpperCAmelCase : Any = convert_tax_to_pytorch(
lowerCAmelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase__ , scalable_attention=lowerCAmelCase__ )
__UpperCAmelCase : str = make_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = MTaConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCAmelCase : List[Any] = UMTaEncoderModel(lowerCAmelCase__ )
else:
__UpperCAmelCase : Dict = UMTaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase__ )
print("""Done""" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
_UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 254 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = RobertaTokenizer
_snake_case = RobertaTokenizerFast
_snake_case = True
_snake_case = {"""cls_token""": """<s>"""}
def UpperCAmelCase ( self ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case : List[str] = dict(zip(A , range(len(A ) ) ) )
snake_case : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case : Union[str, Any] = {"""unk_token""": """<unk>"""}
snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def UpperCAmelCase ( self , **A ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase ( self , **A ) -> str:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
snake_case : Optional[Any] = """lower newer"""
snake_case : List[Any] = """lower newer"""
return input_text, output_text
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case : Optional[Any] = """lower newer"""
snake_case : List[str] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case : List[str] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A , A )
snake_case : Dict = tokens + [tokenizer.unk_token]
snake_case : Dict = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def UpperCAmelCase ( self ) -> Any:
snake_case : Any = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Optional[int] = self.tokenizer_class.from_pretrained("""roberta-base""" )
snake_case : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=A )
snake_case : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A )
snake_case : Optional[int] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=A , add_prefix_space=A )
snake_case : Optional[int] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A )
snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A )
snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase ( self ) -> Dict:
snake_case : int = self.get_tokenizer()
snake_case : str = """Encode this sequence."""
snake_case : Optional[int] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
snake_case : Optional[Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
snake_case : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A , A )
snake_case : int = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
snake_case : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A , A )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
snake_case : List[str] = tokenizer.encode(A , add_special_tokens=A )
snake_case : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A , A )
# Testing spaces after special tokens
snake_case : Union[str, Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space
snake_case : List[str] = tokenizer.convert_tokens_to_ids(A )
snake_case : Optional[Any] = """Encode <mask> sequence"""
snake_case : List[Any] = """Encode <mask>sequence"""
snake_case : Union[str, Any] = tokenizer.encode(A )
snake_case : List[str] = encoded.index(A )
snake_case : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A , A )
snake_case : str = tokenizer.encode(A )
snake_case : Tuple = encoded.index(A )
snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A , A )
def UpperCAmelCase ( self ) -> int:
pass
def UpperCAmelCase ( self ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(A , **A )
snake_case : Optional[Any] = self.tokenizer_class.from_pretrained(A , **A )
snake_case : Union[str, Any] = """A, <mask> AllenNLP sentence."""
snake_case : Optional[int] = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
snake_case : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
snake_case : str = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
snake_case : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def UpperCAmelCase ( self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A )
snake_case : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""trim_offsets"""] , A )
def UpperCAmelCase ( self ) -> List[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : Tuple = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case : int = f"""{text_of_1_token} {text_of_1_token}"""
snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
snake_case : Any = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
snake_case : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
snake_case : int = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
snake_case : Optional[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
snake_case : Union[str, Any] = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
snake_case : List[str] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , )
snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
snake_case : List[str] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
snake_case : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
snake_case : List[str] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
| 176 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """bart"""
_snake_case = ["""past_key_values"""]
_snake_case = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , A=5_0_2_6_5 , A=1_0_2_4 , A=1_2 , A=4_0_9_6 , A=1_6 , A=1_2 , A=4_0_9_6 , A=1_6 , A=0.0 , A=0.0 , A="gelu" , A=1_0_2_4 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=0.0 , A=False , A=True , A=3 , A=1 , A=0 , A=2 , A=True , A=2 , A=2 , **A , ) -> Any:
snake_case : Optional[int] = vocab_size
snake_case : Union[str, Any] = max_position_embeddings
snake_case : List[str] = d_model
snake_case : List[Any] = encoder_ffn_dim
snake_case : Optional[Any] = encoder_layers
snake_case : Union[str, Any] = encoder_attention_heads
snake_case : str = decoder_ffn_dim
snake_case : Union[str, Any] = decoder_layers
snake_case : Any = decoder_attention_heads
snake_case : Union[str, Any] = dropout
snake_case : List[str] = attention_dropout
snake_case : List[Any] = activation_dropout
snake_case : Optional[int] = activation_function
snake_case : Union[str, Any] = init_std
snake_case : List[str] = encoder_layerdrop
snake_case : int = decoder_layerdrop
snake_case : str = classifier_dropout
snake_case : List[str] = use_cache
snake_case : Tuple = encoder_layers
snake_case : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=A , pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , forced_eos_token_id=A , **A , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , A ):
snake_case : Any = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
snake_case : Optional[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case : Tuple = {0: """batch"""}
snake_case : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
snake_case : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
snake_case : Any = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(A , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case , snake_case : List[Any] = self.num_layers
for i in range(A ):
snake_case : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
snake_case : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
snake_case : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
snake_case : Any = super().outputs
else:
snake_case : Any = super(A , self ).outputs
if self.use_past:
snake_case , snake_case : Any = self.num_layers
for i in range(A ):
snake_case : Any = {0: """batch""", 2: """past_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
snake_case : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , A , A , A , A )
# Generate decoder inputs
snake_case : Any = seq_length if not self.use_past else 1
snake_case : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , A , A , A , A )
snake_case : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
snake_case : List[str] = dict(**A , **A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case , snake_case : Optional[int] = common_inputs["""input_ids"""].shape
snake_case : Any = common_inputs["""decoder_input_ids"""].shape[1]
snake_case , snake_case : Optional[Any] = self.num_attention_heads
snake_case : Optional[int] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : Any = decoder_seq_length + 3
snake_case : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case : str = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(A , A )] , dim=1 )
snake_case : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case , snake_case : Any = self.num_layers
snake_case : List[str] = min(A , A )
snake_case : Dict = max(A , A ) - min_num_layers
snake_case : List[str] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(A ):
common_inputs["past_key_values"].append(
(
torch.zeros(A ),
torch.zeros(A ),
torch.zeros(A ),
torch.zeros(A ),
) )
# TODO: test this.
snake_case : Tuple = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(A , A ):
common_inputs["past_key_values"].append((torch.zeros(A ), torch.zeros(A )) )
return common_inputs
def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
snake_case : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , A , A , A , A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case , snake_case : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case : Optional[int] = seqlen + 2
snake_case , snake_case : Tuple = self.num_layers
snake_case , snake_case : Optional[Any] = self.num_attention_heads
snake_case : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : Optional[Any] = common_inputs["""attention_mask"""].dtype
snake_case : int = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(A , A , dtype=A )] , dim=1 )
snake_case : Union[str, Any] = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(A )
]
return common_inputs
def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case : int = compute_effective_axis_dimension(
A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case : int = tokenizer.num_special_tokens_to_add(A )
snake_case : Tuple = compute_effective_axis_dimension(
A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A )
# Generate dummy inputs according to compute batch and sequence
snake_case : int = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case : str = dict(tokenizer(A , return_tensors=A ) )
return common_inputs
def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
snake_case : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
elif self.task == "causal-lm":
snake_case : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
else:
snake_case : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
return common_inputs
def UpperCAmelCase ( self , A , A , A , A ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
snake_case : Optional[Any] = super()._flatten_past_key_values_(A , A , A , A )
else:
snake_case : Union[str, Any] = super(A , self )._flatten_past_key_values_(
A , A , A , A )
| 176 | 1 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( A_ ):
def __init__( self : Optional[Any] , _lowerCamelCase : Optional[int]=None , **_lowerCamelCase : Optional[int] ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _lowerCamelCase , )
super().__init__(args=_lowerCamelCase , **_lowerCamelCase )
| 288 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : list , __lowerCamelCase : int = 0 ) -> list:
_snake_case = length or len(__lowerCamelCase )
_snake_case = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_snake_case , _snake_case = list_data[i + 1], list_data[i]
_snake_case = True
return list_data if not swapped else bubble_sort(__lowerCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288 | 1 |
"""simple docstring"""
from math import ceil
def A_ ( snake_case_ : int = 1_0_0_1 ):
'''simple docstring'''
UpperCamelCase : str = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
UpperCamelCase : Optional[Any] = 2 * i + 1
UpperCamelCase : List[str] = 2 * i
UpperCamelCase : Optional[int] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__A : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 27 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : int = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 27 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.