code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self) -> Optional[Any]:
_A : Union[str, Any] = {}
def _lowerCamelCase ( self) -> None:
print(self.vertex)
for i in self.vertex:
print(__lowerCamelCase , " -> " , " -> ".join([str(__lowerCamelCase) for j in self.vertex[i]]))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCamelCase)
else:
# else make a new vertex
_A : Optional[Any] = [to_vertex]
def _lowerCamelCase ( self) -> None:
# visited array for storing already visited nodes
_A : List[Any] = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(__lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> None:
# mark start vertex as visited
_A : str = True
print(__lowerCamelCase , end=" ")
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCamelCase , __lowerCamelCase)
if __name__ == "__main__":
lowerCAmelCase__ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 11 | import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
_A = tempfile.mkdtemp()
_A = BlipImageProcessor()
_A = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_A = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
_A = InstructBlipProcessor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).qformer_tokenizer
def UpperCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> str:
_A = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_A = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = self.prepare_image_inputs()
_A = image_processor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = processor(text=lowerCAmelCase_ )
_A = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_A = qformer_tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> int:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 180 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Any = """bert"""
def __init__( self : int , a__ : Dict=3_0522 , a__ : int=768 , a__ : Dict=12 , a__ : List[str]=12 , a__ : List[str]=3072 , a__ : int="gelu" , a__ : List[str]=0.1 , a__ : Union[str, Any]=0.1 , a__ : str=512 , a__ : List[Any]=2 , a__ : Any=0.02 , a__ : Optional[int]=1E-12 , a__ : List[Any]=0 , a__ : Dict="absolute" , a__ : Dict=True , a__ : Optional[int]=None , **a__ : Optional[int] , ):
super().__init__(pad_token_id=a__ , **a__ )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = position_embedding_type
__magic_name__ = use_cache
__magic_name__ = classifier_dropout
class _SCREAMING_SNAKE_CASE ( __a ):
@property
def snake_case__ ( self : List[Any] ):
if self.task == "multiple-choice":
__magic_name__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__magic_name__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 356 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def UpperCamelCase ( a , a ) -> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCamelCase ( a ) -> list[str]:
'''simple docstring'''
__magic_name__ = []
__magic_name__ = 11
__magic_name__ = int('''1''' + '''0''' * digit_len )
for num in range(a , a ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a , a ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
__magic_name__ = 10
return solutions
def UpperCamelCase ( a = 2 ) -> int:
'''simple docstring'''
__magic_name__ = 1.0
for fraction in fraction_list(a ):
__magic_name__ = Fraction(a )
result *= frac.denominator / frac.numerator
return int(a )
if __name__ == "__main__":
print(solution())
| 98 | 0 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = set()
# edges = list of graph's edges
_lowerCAmelCase : Dict = get_edges(_lowerCamelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = edges.pop()
chosen_vertices.add(_lowerCamelCase )
chosen_vertices.add(_lowerCamelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_lowerCamelCase )
return chosen_vertices
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 36 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE :str = 'RegNetConfig'
# Base docstring
SCREAMING_SNAKE_CASE :List[str] = 'facebook/regnet-y-040'
SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE :Optional[int] = 'facebook/regnet-y-040'
SCREAMING_SNAKE_CASE :Any = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE :Optional[int] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ,A : int ,A : int = 3 ,A : int = 1 ,A : int = 1 ,A : Optional[str] = "relu" ,**A : Dict ,):
super().__init__(**A )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__A = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__A = tf.keras.layers.ConvaD(
filters=A ,kernel_size=A ,strides=A ,padding="VALID" ,groups=A ,use_bias=A ,name="convolution" ,)
__A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" )
__A = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase_ ( self : List[Any] ,A : Any ):
__A = self.convolution(self.padding(A ) )
__A = self.normalization(A )
__A = self.activation(A )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ,A : RegNetConfig ,**A : str ):
super().__init__(**A )
__A = config.num_channels
__A = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name="embedder" ,)
def UpperCamelCase_ ( self : Tuple ,A : Optional[Any] ):
__A = shape_list(A )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__A = tf.transpose(A ,perm=(0, 2, 3, 1) )
__A = self.embedder(A )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[int] ,A : int ,A : int = 2 ,**A : Tuple ):
super().__init__(**A )
__A = tf.keras.layers.ConvaD(
filters=A ,kernel_size=1 ,strides=A ,use_bias=A ,name="convolution" )
__A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" )
def UpperCamelCase_ ( self : Union[str, Any] ,A : tf.Tensor ,A : bool = False ):
return self.normalization(self.convolution(A ) ,training=A )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Dict ,A : int ,A : int ,**A : str ):
super().__init__(**A )
__A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" )
__A = [
tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="relu" ,name="attention.0" ),
tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="sigmoid" ,name="attention.2" ),
]
def UpperCamelCase_ ( self : Dict ,A : List[Any] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__A = self.pooler(A )
for layer_module in self.attention:
__A = layer_module(A )
__A = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : Optional[int] ):
super().__init__(**A )
__A = in_channels != out_channels or stride != 1
__A = max(1 ,out_channels // config.groups_width )
__A = (
TFRegNetShortCut(A ,stride=A ,name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" ,name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__A = [
TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ),
TFRegNetConvLayer(
A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ),
TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.2" ),
]
__A = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : int ,A : Optional[int] ):
__A = hidden_state
for layer_module in self.layers:
__A = layer_module(A )
__A = self.shortcut(A )
hidden_state += residual
__A = self.activation(A )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : str ):
super().__init__(**A )
__A = in_channels != out_channels or stride != 1
__A = max(1 ,out_channels // config.groups_width )
__A = (
TFRegNetShortCut(A ,stride=A ,name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" ,name="shortcut" )
)
__A = [
TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ),
TFRegNetConvLayer(
A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ),
TFRegNetSELayer(A ,reduced_channels=int(round(in_channels / 4 ) ) ,name="layer.2" ),
TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.3" ),
]
__A = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Dict ,A : Any ):
__A = hidden_state
for layer_module in self.layers:
__A = layer_module(A )
__A = self.shortcut(A )
hidden_state += residual
__A = self.activation(A )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 2 ,A : int = 2 ,**A : Optional[int] ):
super().__init__(**A )
__A = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__A = [
# downsampling is done in the first layer with stride of 2
layer(A ,A ,A ,stride=A ,name="layers.0" ),
*[layer(A ,A ,A ,name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def UpperCamelCase_ ( self : Any ,A : List[str] ):
for layer_module in self.layers:
__A = layer_module(A )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any ,A : RegNetConfig ,**A : List[str] ):
super().__init__(**A )
__A = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name="stages.0" ,) )
__A = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(A ,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(A ,A ,A ,depth=A ,name=f'''stages.{i+1}''' ) )
def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor ,A : bool = False ,A : bool = True ):
__A = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__A = hidden_states + (hidden_state,)
__A = stage_module(A )
if output_hidden_states:
__A = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=A ,hidden_states=A )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
snake_case_ = RegNetConfig
def __init__( self : int ,A : Optional[int] ,**A : Dict ):
super().__init__(**A )
__A = config
__A = TFRegNetEmbeddings(A ,name="embedder" )
__A = TFRegNetEncoder(A ,name="encoder" )
__A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" )
@unpack_inputs
def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : bool = False ,):
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.embedder(A ,training=A )
__A = self.encoder(
A ,output_hidden_states=A ,return_dict=A ,training=A )
__A = encoder_outputs[0]
__A = self.pooler(A )
# Change to NCHW output format have uniformity in the modules
__A = tf.transpose(A ,perm=(0, 3, 1, 2) )
__A = tf.transpose(A ,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__A = tuple([tf.transpose(A ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A ,pooler_output=A ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = RegNetConfig
snake_case_ = "regnet"
snake_case_ = "pixel_values"
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )}
SCREAMING_SNAKE_CASE :Dict = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE :Dict = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , __SCREAMING_SNAKE_CASE , )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,A : RegNetConfig ,*A : List[Any] ,**A : str ):
super().__init__(A ,*A ,**A )
__A = TFRegNetMainLayer(A ,name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=A ,config_class=_CONFIG_FOR_DOC ,modality="vision" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : int=False ,):
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.regnet(
pixel_values=A ,output_hidden_states=A ,return_dict=A ,training=A ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __SCREAMING_SNAKE_CASE , )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] ,A : RegNetConfig ,*A : str ,**A : Tuple ):
super().__init__(A ,*A ,**A )
__A = config.num_labels
__A = TFRegNetMainLayer(A ,name="regnet" )
# classification head
__A = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor = None ,A : tf.Tensor = None ,A : bool = None ,A : bool = None ,A : Union[str, Any]=False ,):
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.regnet(
A ,output_hidden_states=A ,return_dict=A ,training=A )
__A = outputs.pooler_output if return_dict else outputs[1]
__A = self.classifier[0](A )
__A = self.classifier[1](A )
__A = None if labels is None else self.hf_compute_loss(labels=A ,logits=A )
if not return_dict:
__A = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A ,logits=A ,hidden_states=outputs.hidden_states )
| 15 | 0 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCamelCase_ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase_ (datasets.BuilderConfig ):
__magic_name__ = None
def snake_case ( A__ ,A__ ,):
import pyspark
def generate_fn():
UpperCAmelCase_ : Union[str, Any] = df.select("*" ,pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
UpperCAmelCase_ : List[str] = df_with_partition_id.select("*" ).where(F"""part_id = {partition_id}""" ).drop("part_id" )
UpperCAmelCase_ : Optional[int] = partition_df.collect()
UpperCAmelCase_ : int = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase_ (_BaseExamplesIterable ):
def __init__( self : List[str] , lowerCAmelCase_ : "pyspark.sql.DataFrame" , lowerCAmelCase_ : Union[str, Any]=None , ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = df
UpperCAmelCase_ : List[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCAmelCase_ : Union[str, Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Optional[Any] ) -> str:
yield from self.generate_examples_fn()
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : np.random.Generator ) -> "SparkExamplesIterable":
UpperCAmelCase_ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCAmelCase_ )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> "SparkExamplesIterable":
UpperCAmelCase_ : Dict = self.split_shard_indices_by_worker(lowerCAmelCase_ , lowerCAmelCase_ )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
return len(self.partition_order )
class UpperCamelCase_ (datasets.DatasetBuilder ):
__magic_name__ = SparkConfig
def __init__( self : int , lowerCAmelCase_ : "pyspark.sql.DataFrame" , lowerCAmelCase_ : str = None , lowerCAmelCase_ : str = None , **lowerCAmelCase_ : Union[str, Any] , ) -> List[Any]:
import pyspark
UpperCAmelCase_ : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCAmelCase_ : str = df
UpperCAmelCase_ : Optional[Any] = working_dir
super().__init__(
cache_dir=lowerCAmelCase_ , config_name=str(self.df.semanticHash() ) , **lowerCAmelCase_ , )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
# Returns the path of the created file.
def create_cache_and_write_probe(lowerCAmelCase_ : int ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCAmelCase_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCAmelCase_ : Optional[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCAmelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return datasets.DatasetInfo(features=self.config.features )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : datasets.download.download_manager.DownloadManager ) -> List[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
import pyspark
def get_arrow_batch_size(lowerCAmelCase_ : str ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
UpperCAmelCase_ : List[str] = self.df.count()
UpperCAmelCase_ : Tuple = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCAmelCase_ : Optional[Any] = (
self.df.limit(lowerCAmelCase_ )
.repartition(1 )
.mapInArrow(lowerCAmelCase_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCAmelCase_ : Tuple = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCAmelCase_ : Union[str, Any] = min(lowerCAmelCase_ , int(approx_total_size / max_shard_size ) )
UpperCAmelCase_ : Tuple = self.df.repartition(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
UpperCAmelCase_ : List[str] = ParquetWriter if file_format == "parquet" else ArrowWriter
UpperCAmelCase_ : str = os.path.join(self._working_dir , os.path.basename(lowerCAmelCase_ ) ) if self._working_dir else fpath
UpperCAmelCase_ : Optional[int] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCAmelCase_ : Any = self.config.features
UpperCAmelCase_ : int = self._writer_batch_size
UpperCAmelCase_ : str = self._fs.storage_options
def write_arrow(lowerCAmelCase_ : List[Any] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCAmelCase_ : str = pyspark.TaskContext().taskAttemptId()
UpperCAmelCase_ : List[str] = next(lowerCAmelCase_ , lowerCAmelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : List[str] = writer_class(
features=lowerCAmelCase_ , path=working_fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , writer_batch_size=lowerCAmelCase_ , storage_options=lowerCAmelCase_ , embed_local_files=lowerCAmelCase_ , )
UpperCAmelCase_ : int = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCAmelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
UpperCAmelCase_ : Tuple = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , writer_batch_size=lowerCAmelCase_ , storage_options=lowerCAmelCase_ , embed_local_files=lowerCAmelCase_ , )
UpperCAmelCase_ : int = pa.Table.from_batches([batch] )
writer.write_table(lowerCAmelCase_ )
if writer._num_bytes > 0:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCAmelCase_ ) ):
UpperCAmelCase_ : Optional[int] = os.path.join(os.path.dirname(lowerCAmelCase_ ) , os.path.basename(lowerCAmelCase_ ) )
shutil.move(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = (
self.df.mapInArrow(lowerCAmelCase_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : "datasets.SplitGenerator" , lowerCAmelCase_ : str = "arrow" , lowerCAmelCase_ : Optional[Union[str, int]] = None , lowerCAmelCase_ : Optional[int] = None , **lowerCAmelCase_ : Union[str, Any] , ) -> int:
self._validate_cache_dir()
UpperCAmelCase_ : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = not is_remote_filesystem(self._fs )
UpperCAmelCase_ : int = os.path.join if is_local else posixpath.join
UpperCAmelCase_ : str = "-TTTTT-SSSSS-of-NNNNN"
UpperCAmelCase_ : Any = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
UpperCAmelCase_ : Any = path_join(self._output_dir , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Dict = []
for task_id, content in self._prepare_split_single(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = total_num_examples
UpperCAmelCase_ : List[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
UpperCAmelCase_ : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCAmelCase_ : List[str] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , ):
rename(
lowerCAmelCase_ , fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , f"""{global_shard_id:05d}""" ).replace("NNNNN" , f"""{total_shards:05d}""" ) , )
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : int = 0
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = task_id_and_num_shards[i]
for shard_id in range(lowerCAmelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCAmelCase_ , len(lowerCAmelCase_ ) ).map(lambda lowerCAmelCase_ : _rename_shard(*lowerCAmelCase_ ) ).collect()
else:
# don't use any pattern
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : str = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , fpath.replace(lowerCAmelCase_ , "" ) , )
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 253 |
"""simple docstring"""
from math import factorial
lowerCamelCase_ = {str(d): factorial(d) for d in range(10)}
def snake_case ( A__ ):
return sum(DIGIT_FACTORIAL[d] for d in str(A__ ) )
def snake_case ( ):
UpperCAmelCase_ : int = 7 * factorial(9 ) + 1
return sum(i for i in range(3 ,A__ ) if sum_of_digit_factorial(A__ ) == i )
if __name__ == "__main__":
print(f'{solution() = }')
| 253 | 1 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
class a__( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None):
"""simple docstring"""
super().__init__()
lowerCAmelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowerCAmelCase = torch.zeros(__lowerCAmelCase , __lowerCAmelCase)
else:
lowerCAmelCase = None
lowerCAmelCase = torch.nn.Parameter(__lowerCAmelCase)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : VQModel
UpperCAmelCase_ : CLIPTextModel
UpperCAmelCase_ : CLIPTokenizer
UpperCAmelCase_ : TransformeraDModel
UpperCAmelCase_ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase_ : VQDiffusionScheduler
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=__lowerCAmelCase , transformer=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , scheduler=__lowerCAmelCase , learned_classifier_free_sampling_embeddings=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = len(__lowerCAmelCase) if isinstance(__lowerCAmelCase , __lowerCAmelCase) else 1
# get prompt text embeddings
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowerCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f" {self.tokenizer.model_max_length} tokens: {removed_text}")
lowerCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase = self.text_encoder(text_input_ids.to(self.device))[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowerCAmelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__lowerCAmelCase)
# duplicate text embeddings for each generation per prompt
lowerCAmelCase = prompt_embeds.repeat_interleave(__lowerCAmelCase , dim=0)
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowerCAmelCase = self.learned_classifier_free_sampling_embeddings.embeddings
lowerCAmelCase = negative_prompt_embeds.unsqueeze(0).repeat(__lowerCAmelCase , 1 , 1)
else:
lowerCAmelCase = [""""""] * batch_size
lowerCAmelCase = text_input_ids.shape[-1]
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding="""max_length""" , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""" , )
lowerCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# See comment for normalizing text embeddings
lowerCAmelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__lowerCAmelCase)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase = negative_prompt_embeds.shape[1]
lowerCAmelCase = negative_prompt_embeds.repeat(1 , __lowerCAmelCase , 1)
lowerCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __lowerCAmelCase , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
@torch.no_grad()
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = 100 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = 1 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = 1 , ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = 1
elif isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = len(__lowerCAmelCase)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__lowerCAmelCase)}")
lowerCAmelCase = batch_size * num_images_per_prompt
lowerCAmelCase = guidance_scale > 1.0
lowerCAmelCase = self._encode_prompt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__lowerCAmelCase)}.")
# get the initial completely masked latents unless the user supplied it
lowerCAmelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowerCAmelCase = self.transformer.num_vector_embeds - 1
lowerCAmelCase = torch.full(__lowerCAmelCase , __lowerCAmelCase).to(self.device)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f" {self.transformer.num_vector_embeds - 1} (inclusive).")
lowerCAmelCase = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(__lowerCAmelCase , device=self.device)
lowerCAmelCase = self.scheduler.timesteps.to(self.device)
lowerCAmelCase = latents
for i, t in enumerate(self.progress_bar(__lowerCAmelCase)):
# expand the sample if we are doing classifier free guidance
lowerCAmelCase = torch.cat([sample] * 2) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowerCAmelCase = self.transformer(__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase).sample
if do_classifier_free_guidance:
lowerCAmelCase , lowerCAmelCase = model_output.chunk(2)
lowerCAmelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__lowerCAmelCase , dim=1 , keepdim=__lowerCAmelCase)
lowerCAmelCase = self.truncate(__lowerCAmelCase , __lowerCAmelCase)
# remove `log(0)`'s (`-inf`s)
lowerCAmelCase = model_output.clamp(-70)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(__lowerCAmelCase , timestep=__lowerCAmelCase , sample=__lowerCAmelCase , generator=__lowerCAmelCase).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
lowerCAmelCase = self.vqvae.config.vq_embed_dim
lowerCAmelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowerCAmelCase = self.vqvae.quantize.get_codebook_entry(__lowerCAmelCase , shape=__lowerCAmelCase)
lowerCAmelCase = self.vqvae.decode(__lowerCAmelCase , force_not_quantize=__lowerCAmelCase).sample
lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1)
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(__lowerCAmelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = torch.sort(__lowerCAmelCase , 1 , descending=__lowerCAmelCase)
lowerCAmelCase = torch.exp(__lowerCAmelCase)
lowerCAmelCase = sorted_p_x_0.cumsum(dim=1) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowerCAmelCase = torch.full_like(keep_mask[:, 0:1, :] , __lowerCAmelCase)
lowerCAmelCase = torch.cat((all_true, keep_mask) , dim=1)
lowerCAmelCase = keep_mask[:, :-1, :]
lowerCAmelCase = keep_mask.gather(1 , indices.argsort(1))
lowerCAmelCase = log_p_x_0.clone()
lowerCAmelCase = -torch.inf # -inf = log(0)
return rv
| 272 | '''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__( enum.Enum ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Any = 2
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params)
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""")
lowerCAmelCase = handle_long_generation
preprocess_params.update(__lowerCAmelCase)
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
if len(__lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True):
"""simple docstring"""
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(__lowerCAmelCase)
return records
| 272 | 1 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase = 5_00_00
__UpperCAmelCase = 50_00
__UpperCAmelCase , __UpperCAmelCase = os.path.split(__file__)
__UpperCAmelCase = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def __UpperCamelCase ( lowercase__ : datasets.Dataset , lowercase__ : str ) -> Dict:
'''simple docstring'''
for i in range(lowercase__ ):
lowerCAmelCase_ : Optional[Any] = dataset[i]
@get_duration
def __UpperCamelCase ( lowercase__ : datasets.Dataset , lowercase__ : List[Any] , lowercase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for i in range(0 , len(lowercase__ ) , lowercase__ ):
lowerCAmelCase_ : List[str] = dataset[i : i + batch_size]
@get_duration
def __UpperCamelCase ( lowercase__ : datasets.Dataset , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> str:
'''simple docstring'''
with dataset.formatted_as(type=lowercase__ ):
for i in range(lowercase__ ):
lowerCAmelCase_ : Optional[int] = dataset[i]
@get_duration
def __UpperCamelCase ( lowercase__ : datasets.Dataset , lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=lowercase__ ):
for i in range(0 , lowercase__ , lowercase__ ):
lowerCAmelCase_ : Union[str, Any] = dataset[i : i + batch_size]
def __UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = {"""num examples""": SPEED_TEST_N_EXAMPLES}
lowerCAmelCase_ : Tuple = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
lowerCAmelCase_ : Union[str, Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
lowerCAmelCase_ : Any = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
lowerCAmelCase_ : str = generate_example_dataset(
os.path.join(lowercase__ , """dataset.arrow""" ) , lowercase__ , num_examples=lowercase__ , seq_shapes={"""list""": (100,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(lowercase__ ) )
lowerCAmelCase_ : Union[str, Any] = func(lowercase__ , **lowercase__ )
print("""shuffling dataset""" )
lowerCAmelCase_ : Tuple = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(lowercase__ ) )
lowerCAmelCase_ : Optional[int] = func(
lowercase__ , **lowercase__ )
with open(lowercase__ , """wb""" ) as f:
f.write(json.dumps(lowercase__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 28 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : str=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( lowercase__ : int , lowercase__ : Dict , lowercase__ : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : int = """"""
else:
lowerCAmelCase_ : Union[str, Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : str = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase_ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( lowercase__ : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = dct.pop(lowercase__ )
lowerCAmelCase_ : List[Any] = val
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Any=True ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowerCAmelCase_ : Dict = 8
# set labels if required
if not base_model:
lowerCAmelCase_ : str = 1000
lowerCAmelCase_ : List[Any] = """huggingface/label-files"""
lowerCAmelCase_ : Optional[int] = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : List[str] = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Any = idalabel
lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowerCAmelCase_ : Union[str, Any] = 384
lowerCAmelCase_ : Any = 1536
lowerCAmelCase_ : Union[str, Any] = 12
lowerCAmelCase_ : str = 6
# load original model from torch hub
lowerCAmelCase_ : Any = torch.hub.load("""facebookresearch/dino:main""" , lowercase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ : Any = original_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
lowerCAmelCase_ : Dict = create_rename_keys(lowercase__ , base_model=lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
if base_model:
lowerCAmelCase_ : int = ViTModel(lowercase__ , add_pooling_layer=lowercase__ ).eval()
else:
lowerCAmelCase_ : Union[str, Any] = ViTForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by ViTImageProcessor
lowerCAmelCase_ : List[str] = ViTImageProcessor()
lowerCAmelCase_ : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : List[str] = encoding["""pixel_values"""]
lowerCAmelCase_ : Optional[int] = model(lowercase__ )
if base_model:
lowerCAmelCase_ : Union[str, Any] = original_model(lowercase__ )
assert torch.allclose(lowercase__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
lowerCAmelCase_ : int = original_model(lowercase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 28 | 1 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Tuple = PriorTransformer
UpperCAmelCase__ : Tuple = '''hidden_states'''
@property
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 8
UpperCAmelCase_ = 7
UpperCAmelCase_ = floats_tensor((batch_size, embedding_dim)).to(_snake_case)
UpperCAmelCase_ = floats_tensor((batch_size, embedding_dim)).to(_snake_case)
UpperCAmelCase_ = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(_snake_case)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase ( self : int , _snake_case : List[str]=0):
"""simple docstring"""
torch.manual_seed(_snake_case)
UpperCAmelCase_ = 4
UpperCAmelCase_ = 8
UpperCAmelCase_ = 7
UpperCAmelCase_ = torch.randn((batch_size, embedding_dim)).to(_snake_case)
UpperCAmelCase_ = torch.randn((batch_size, embedding_dim)).to(_snake_case)
UpperCAmelCase_ = torch.randn((batch_size, num_embeddings, embedding_dim)).to(_snake_case)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return (4, 8)
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
return (4, 8)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' , output_loading_info=_snake_case)
self.assertIsNotNone(_snake_case)
self.assertEqual(len(loading_info['''missing_keys''']) , 0)
model.to(_snake_case)
UpperCAmelCase_ = model(**self.dummy_input)[0]
assert hidden_states is not None, "Make sure output is not None"
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.model_class(**_snake_case)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''')
UpperCAmelCase_ = model.to(_snake_case)
if hasattr(_snake_case , '''set_default_attn_processor'''):
model.set_default_attn_processor()
UpperCAmelCase_ = self.get_dummy_seed_input()
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)[0]
UpperCAmelCase_ = output[0, :5].flatten().cpu()
print(_snake_case)
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCAmelCase_ = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9])
self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1e-2))
@slow
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[Any]=1 , _snake_case : Any=768 , _snake_case : Optional[Any]=77 , _snake_case : Optional[int]=0):
"""simple docstring"""
torch.manual_seed(_snake_case)
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = embedding_dim
UpperCAmelCase_ = num_embeddings
UpperCAmelCase_ = torch.randn((batch_size, embedding_dim)).to(_snake_case)
UpperCAmelCase_ = torch.randn((batch_size, embedding_dim)).to(_snake_case)
UpperCAmelCase_ = torch.randn((batch_size, num_embeddings, embedding_dim)).to(_snake_case)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase ( self : str):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[37, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
])
def lowerCamelCase ( self : List[str] , _snake_case : List[Any] , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''')
model.to(_snake_case)
UpperCAmelCase_ = self.get_dummy_seed_input(seed=_snake_case)
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)[0]
assert list(sample.shape) == [1, 768]
UpperCAmelCase_ = sample[0, :8].flatten().cpu()
print(_snake_case)
UpperCAmelCase_ = torch.tensor(_snake_case)
assert torch_all_close(_snake_case , _snake_case , atol=1e-3)
| 51 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class __magic_name__ :
"""simple docstring"""
def __init__( self :Tuple , snake_case :Optional[Any] , snake_case :Tuple=13 , snake_case :Dict=7 , snake_case :List[Any]=True , snake_case :List[Any]=True , snake_case :Dict=True , snake_case :Any=True , snake_case :Optional[int]=99 , snake_case :Any=32 , snake_case :Dict=2 , snake_case :int=4 , snake_case :Optional[int]=37 , snake_case :List[str]="gelu" , snake_case :List[Any]=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Tuple=512 , snake_case :Tuple=16 , snake_case :Tuple=2 , snake_case :Optional[int]=0.02 , snake_case :str=3 , snake_case :Optional[int]=4 , snake_case :List[str]=None , snake_case :Tuple=1_000 , ):
'''simple docstring'''
A_ : str = parent
A_ : str = batch_size
A_ : str = seq_length
A_ : Any = is_training
A_ : Any = use_input_mask
A_ : str = use_token_type_ids
A_ : Tuple = use_labels
A_ : Optional[Any] = vocab_size
A_ : Dict = hidden_size
A_ : str = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : str = intermediate_size
A_ : int = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : Any = type_sequence_label_size
A_ : Dict = initializer_range
A_ : Any = num_labels
A_ : Optional[int] = num_choices
A_ : Optional[Any] = scope
A_ : Any = range_bbox
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A_ : str = bbox[i, j, 3]
A_ : Union[str, Any] = bbox[i, j, 1]
A_ : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A_ : Any = bbox[i, j, 2]
A_ : Tuple = bbox[i, j, 0]
A_ : int = t
A_ : int = tf.convert_to_tensor(snake_case )
A_ : Any = None
if self.use_input_mask:
A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : str = None
if self.use_token_type_ids:
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Dict = None
A_ : List[Any] = None
A_ : List[str] = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : str = ids_tensor([self.batch_size] , self.num_choices )
A_ : int = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Dict , snake_case :Union[str, Any] , snake_case :int , snake_case :int , snake_case :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Any = TFLayoutLMModel(config=snake_case )
A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
A_ : str = model(snake_case , snake_case , token_type_ids=snake_case )
A_ : List[Any] = model(snake_case , snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Any , snake_case :List[Any] , snake_case :List[str] , snake_case :Optional[Any] , snake_case :Dict , snake_case :Any , snake_case :Union[str, Any] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Optional[int] = TFLayoutLMForMaskedLM(config=snake_case )
A_ : Tuple = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Dict , snake_case :Tuple , snake_case :Tuple , snake_case :List[str] , snake_case :Tuple , snake_case :str , snake_case :Optional[int] , snake_case :Any ):
'''simple docstring'''
A_ : Union[str, Any] = self.num_labels
A_ : int = TFLayoutLMForSequenceClassification(config=snake_case )
A_ : Optional[int] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Dict , snake_case :str , snake_case :Optional[Any] , snake_case :int , snake_case :Any , snake_case :Tuple , snake_case :List[str] , snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.num_labels
A_ : str = TFLayoutLMForTokenClassification(config=snake_case )
A_ : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] , snake_case :Optional[int] , snake_case :Union[str, Any] , snake_case :List[Any] , snake_case :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ):
'''simple docstring'''
A_ : Optional[Any] = TFLayoutLMForQuestionAnswering(config=snake_case )
A_ : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 10
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Tuple = TFLayoutLMModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[str] = TFLayoutLMModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def __snake_case ( ) -> Optional[Any]:
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
A_ : int = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
A_ : int = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
A_ : Union[str, Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
A_ : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
A_ : Tuple = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : str = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
A_ , A_ , A_ , A_ , A_ : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Tuple = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the sequence output on [0, :3, :3]
A_ : List[Any] = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-3 ) )
# test the pooled output on [1, :3]
A_ : Optional[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
A_ , A_ , A_ , A_ , A_ : Any = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Dict = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
A_ : List[str] = outputs.loss
A_ : Union[str, Any] = (2,)
self.assertEqual(loss.shape , snake_case )
# test the shape of the logits
A_ : Tuple = outputs.logits
A_ : Tuple = (2, 2)
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : int = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
A_ , A_ , A_ , A_ , A_ : Optional[int] = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(
input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
# test the shape of the logits
A_ : Dict = outputs.logits
A_ : List[Any] = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Optional[Any] = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
A_ , A_ , A_ , A_ , A_ : str = prepare_layoutlm_batch_inputs()
# forward pass
A_ : Union[str, Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case )
# test the shape of the logits
A_ : Union[str, Any] = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , snake_case )
self.assertEqual(outputs.end_logits.shape , snake_case )
| 300 | 0 |
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_SCREAMING_SNAKE_CASE : List[Any] = str(bin(__lowerCamelCase ) )[2:] # remove the leading "0b"
_SCREAMING_SNAKE_CASE : List[str] = str(bin(__lowerCamelCase ) )[2:] # remove the leading "0b"
_SCREAMING_SNAKE_CASE : List[Any] = max(len(__lowerCamelCase ), len(__lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(__lowerCamelCase ), b_binary.zfill(__lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 325 |
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 1
for i in range(1, n + 1 ):
# to compute current row from previous row.
_SCREAMING_SNAKE_CASE : Union[str, Any] = min(__lowerCamelCase, __lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 325 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
a__ : Optional[int] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[str] = "tapas"
def __init__( self : int , UpperCAmelCase__ : Any=3_0_5_2_2 , UpperCAmelCase__ : Dict=7_6_8 , UpperCAmelCase__ : Optional[Any]=1_2 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : Any=3_0_7_2 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : str=1_0_2_4 , UpperCAmelCase__ : Dict=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : Tuple=1E-12 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Any=10.0 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : str=1.0 , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : str=1.0 , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Tuple=1.0 , UpperCAmelCase__ : Any=1.0 , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : str=False , UpperCAmelCase__ : List[str]="ratio" , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=6_4 , UpperCAmelCase__ : Any=3_2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : Optional[int] , ) -> List[str]:
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_sizes
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
# Fine-tuning task hyperparameters
__SCREAMING_SNAKE_CASE = positive_label_weight
__SCREAMING_SNAKE_CASE = num_aggregation_labels
__SCREAMING_SNAKE_CASE = aggregation_loss_weight
__SCREAMING_SNAKE_CASE = use_answer_as_supervision
__SCREAMING_SNAKE_CASE = answer_loss_importance
__SCREAMING_SNAKE_CASE = use_normalized_answer_loss
__SCREAMING_SNAKE_CASE = huber_loss_delta
__SCREAMING_SNAKE_CASE = temperature
__SCREAMING_SNAKE_CASE = aggregation_temperature
__SCREAMING_SNAKE_CASE = use_gumbel_for_cells
__SCREAMING_SNAKE_CASE = use_gumbel_for_aggregation
__SCREAMING_SNAKE_CASE = average_approximation_function
__SCREAMING_SNAKE_CASE = cell_selection_preference
__SCREAMING_SNAKE_CASE = answer_loss_cutoff
__SCREAMING_SNAKE_CASE = max_num_rows
__SCREAMING_SNAKE_CASE = max_num_columns
__SCREAMING_SNAKE_CASE = average_logits_per_cell
__SCREAMING_SNAKE_CASE = select_one_column
__SCREAMING_SNAKE_CASE = allow_empty_column_selection
__SCREAMING_SNAKE_CASE = init_cell_selection_weights_to_zero
__SCREAMING_SNAKE_CASE = reset_position_index_per_cell
__SCREAMING_SNAKE_CASE = disable_per_token_loss
# Aggregation hyperparameters
__SCREAMING_SNAKE_CASE = aggregation_labels
__SCREAMING_SNAKE_CASE = no_aggregation_label_index
if isinstance(self.aggregation_labels , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in aggregation_labels.items()}
| 54 |
"""simple docstring"""
import os
_a = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1_000}
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[str] = 0
while index < len(__lowerCamelCase ) - 1:
UpperCAmelCase_ : Tuple = SYMBOLS[numerals[index]]
UpperCAmelCase_ : List[str] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = ""
UpperCAmelCase_ : Any = num // 1000
numerals += m_count * "M"
num %= 1000
UpperCAmelCase_ : Any = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
UpperCAmelCase_ : str = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __a ( __lowerCamelCase = "/p089_roman.txt" ):
UpperCAmelCase_ : int = 0
with open(os.path.dirname(__lowerCamelCase ) + roman_numerals_filename ) as filea:
UpperCAmelCase_ : Optional[Any] = filea.readlines()
for line in lines:
UpperCAmelCase_ : Tuple = line.strip()
UpperCAmelCase_ : Optional[Any] = parse_roman_numerals(__lowerCamelCase )
UpperCAmelCase_ : Tuple = generate_roman_numerals(__lowerCamelCase )
savings += len(__lowerCamelCase ) - len(__lowerCamelCase )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 61 | 0 |
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> bool:
SCREAMING_SNAKE_CASE = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 38 |
"""simple docstring"""
import operator as op
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = lambda SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(SCREAMING_SNAKE_CASE_ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(SCREAMING_SNAKE_CASE_ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(SCREAMING_SNAKE_CASE_ ) , sep=' | ' )
else:
SCREAMING_SNAKE_CASE = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(SCREAMING_SNAKE_CASE_ ) , sep=' | ' )
SCREAMING_SNAKE_CASE = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(SCREAMING_SNAKE_CASE_ ) , sep=' | ' )
stack.append(
str(opr[x](int(SCREAMING_SNAKE_CASE_ ) , int(SCREAMING_SNAKE_CASE_ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(SCREAMING_SNAKE_CASE_ ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
__UpperCamelCase = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 38 | 1 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = 3
UpperCAmelCase = 2_5_0
UpperCAmelCase = ids_tensor((batch_size, length) , a__ )
UpperCAmelCase = torch.ones((batch_size, length) , device=a__ , dtype=torch.float ) / length
return input_ids, scores
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(a__ , a__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(a__ , a__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(1_0 )
self.assertTrue(criteria(a__ , a__ ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = MaxLengthCriteria(max_length=1_0 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(a__ , a__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(a__ , a__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(1_0 )
self.assertTrue(criteria(a__ , a__ ) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(a__ , a__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(a__ , a__ ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(1_0 )
self.assertTrue(criteria(a__ , a__ ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(a__ , a__ ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(a__ , a__ ) )
def _lowercase ( self ):
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(a__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(a__ ) , 1 )
| 273 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["CLIPFeatureExtractor"]
_SCREAMING_SNAKE_CASE : Dict = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a : Optional[int] = logging.get_logger(__name__)
# General docstring
a : Union[str, Any] = '''MobileNetV1Config'''
# Base docstring
a : List[Any] = '''google/mobilenet_v1_1.0_224'''
a : Any = [1, 1024, 7, 7]
# Image classification docstring
a : Optional[Any] = '''google/mobilenet_v1_1.0_224'''
a : Optional[int] = '''tabby, tabby cat'''
a : Any = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : List[Any] , _lowercase : List[Any]=None ) ->List[str]:
'''simple docstring'''
a : str = {}
if isinstance(_lowercase , _lowercase ):
a : Any = model.mobilenet_va
else:
a : Any = model
a : Any = "MobilenetV1/Conv2d_0/"
a : Dict = backbone.conv_stem.convolution.weight
a : List[Any] = backbone.conv_stem.normalization.bias
a : str = backbone.conv_stem.normalization.weight
a : Optional[Any] = backbone.conv_stem.normalization.running_mean
a : Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
a : List[Any] = i + 1
a : Dict = i * 2
a : List[str] = backbone.layer[pt_index]
a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
a : Optional[int] = pointer.convolution.weight
a : str = pointer.normalization.bias
a : Union[str, Any] = pointer.normalization.weight
a : Optional[Any] = pointer.normalization.running_mean
a : List[str] = pointer.normalization.running_var
a : Tuple = backbone.layer[pt_index + 1]
a : List[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
a : List[Any] = pointer.convolution.weight
a : Dict = pointer.normalization.bias
a : List[Any] = pointer.normalization.weight
a : List[Any] = pointer.normalization.running_mean
a : Tuple = pointer.normalization.running_var
if isinstance(_lowercase , _lowercase ):
a : Union[str, Any] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
a : List[str] = model.classifier.weight
a : List[Any] = model.classifier.bias
return tf_to_pt_map
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : Optional[int] ) ->List[str]:
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
a : Optional[Any] = tf.train.list_variables(_lowercase )
a : Any = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
a : Optional[Any] = tf.train.load_variable(_lowercase , _lowercase )
a : Optional[int] = array
# Build TF to PyTorch weights loading map
a : Optional[Any] = _build_tf_to_pytorch_map(_lowercase , _lowercase , _lowercase )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
a : List[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
a : Dict = np.transpose(_lowercase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
a : Union[str, Any] = array.squeeze().transpose()
else:
a : int = np.transpose(_lowercase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
a : Optional[int] = torch.from_numpy(_lowercase )
tf_weights.pop(_lowercase , _lowercase )
tf_weights.pop(name + "/RMSProp" , _lowercase )
tf_weights.pop(name + "/RMSProp_1" , _lowercase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _lowercase )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _SCREAMING_SNAKE_CASE ( _lowercase : torch.Tensor , _lowercase : nn.Convad ) ->torch.Tensor:
'''simple docstring'''
a, a : Optional[Any] = features.shape[-2:]
a, a : Optional[int] = conv_layer.stride
a, a : List[str] = conv_layer.kernel_size
if in_height % stride_height == 0:
a : Optional[int] = max(kernel_height - stride_height , 0 )
else:
a : List[Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
a : Optional[int] = max(kernel_width - stride_width , 0 )
else:
a : Any = max(kernel_width - (in_width % stride_width) , 0 )
a : str = pad_along_width // 2
a : int = pad_along_width - pad_left
a : Tuple = pad_along_height // 2
a : Optional[Any] = pad_along_height - pad_top
a : List[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_lowercase , _lowercase , "constant" , 0.0 )
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = True , ) -> None:
super().__init__()
a : Optional[Any] = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
a : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
a : Optional[int] = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
a : List[Any] = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
a : str = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Optional[int] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__ ):
a : Tuple = ACTaFN[config.hidden_act]
else:
a : Optional[int] = config.hidden_act
else:
a : List[Any] = None
def __a ( self , lowerCAmelCase__ ) -> torch.Tensor:
if self.config.tf_padding:
a : Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution )
a : List[Any] = self.convolution(lowerCAmelCase__ )
if self.normalization is not None:
a : int = self.normalization(lowerCAmelCase__ )
if self.activation is not None:
a : Dict = self.activation(lowerCAmelCase__ )
return features
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] =MobileNetVaConfig
lowerCamelCase : Union[str, Any] =load_tf_weights_in_mobilenet_va
lowerCamelCase : Optional[int] ="""mobilenet_v1"""
lowerCamelCase : List[str] ="""pixel_values"""
lowerCamelCase : List[str] =False
def __a ( self , lowerCAmelCase__ ) -> None:
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
a : Optional[Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a : str = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True ) -> Tuple:
super().__init__(lowerCAmelCase__ )
a : int = config
a : Optional[Any] = 32
a : Optional[int] = max(int(depth * config.depth_multiplier ) , config.min_depth )
a : int = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
a : Union[str, Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
a : List[str] = nn.ModuleList()
for i in range(13 ):
a : Any = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
a : Any = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ) )
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ) )
a : Dict = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __a ( self , lowerCAmelCase__ ) -> int:
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __a ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
a : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
a : Dict = self.conv_stem(lowerCAmelCase__ )
a : List[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
a : Optional[Any] = layer_module(lowerCAmelCase__ )
if output_hidden_states:
a : Dict = all_hidden_states + (hidden_states,)
a : List[Any] = hidden_states
if self.pooler is not None:
a : List[Any] = torch.flatten(self.pooler(lowerCAmelCase__ ) , start_dim=1 )
else:
a : List[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ ) -> None:
super().__init__(lowerCAmelCase__ )
a : Dict = config.num_labels
a : str = MobileNetVaModel(lowerCAmelCase__ )
a : Any = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
a : Dict = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__ )
a : Optional[int] = nn.Linear(lowerCAmelCase__ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __a ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
a : Optional[Any] = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
a : Any = outputs.pooler_output if return_dict else outputs[1]
a : int = self.classifier(self.dropout(lowerCAmelCase__ ) )
a : Union[str, Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a : List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a : List[Any] = "single_label_classification"
else:
a : Union[str, Any] = "multi_label_classification"
if self.config.problem_type == "regression":
a : int = MSELoss()
if self.num_labels == 1:
a : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a : Optional[int] = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
a : Optional[int] = CrossEntropyLoss()
a : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a : Dict = BCEWithLogitsLoss()
a : Any = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
a : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 79 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
a : Optional[Any] = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_lowercase )
a : Optional[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_lowercase )
env_command_parser(subparsers=_lowercase )
launch_command_parser(subparsers=_lowercase )
tpu_command_parser(subparsers=_lowercase )
test_command_parser(subparsers=_lowercase )
# Let's go
a : int = parser.parse_args()
if not hasattr(_lowercase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowercase )
if __name__ == "__main__":
main()
| 79 | 1 |
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Union[str, Any] ) ->Optional[Any]:
if height >= 1:
move_tower(height - 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
move_disk(__UpperCAmelCase , __UpperCAmelCase )
move_tower(height - 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[Any] ) ->Any:
print('moving disk from' , __UpperCAmelCase , 'to' , __UpperCAmelCase )
def lowerCAmelCase_ ( ) ->List[Any]:
lowerCamelCase__ : Any =int(input('Height of hanoi: ' ).strip() )
move_tower(__UpperCAmelCase , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main() | 126 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a : int = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
config.addinivalue_line(
'''markers''', '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''', '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''', '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''', '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''', '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''', '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
snake_case_ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__UpperCAmelCase, id=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if exitstatus == 5:
snake_case_ = 0
# Doctest custom flag to ignore output.
a : Union[str, Any] = doctest.register_optionflag('IGNORE_RESULT')
a : Optional[int] = doctest.OutputChecker
class a ( _lowerCamelCase ):
def A_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ )
a : List[Any] = CustomOutputChecker
a : Optional[int] = HfDoctestModule
a : Tuple = HfDocTestParser
| 56 | 0 |
from math import factorial
lowerCamelCase__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
if not isinstance(_lowercase , _lowercase ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_lowercase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : int = 60 , __UpperCAmelCase : int = 1_00_00_00 ) -> int:
if not isinstance(_lowercase , _lowercase ) or not isinstance(_lowercase , _lowercase ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
SCREAMING_SNAKE_CASE_ = 0
# the cached sizes of the previous chains
SCREAMING_SNAKE_CASE_ = {}
for start_chain_element in range(1 , _lowercase ):
# The temporary set will contain the elements of the chain
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
SCREAMING_SNAKE_CASE_ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_lowercase )
chain_set_length += 1
SCREAMING_SNAKE_CASE_ = digit_factorial_sum(_lowercase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
SCREAMING_SNAKE_CASE_ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''') | 363 |
import cva
import numpy as np
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : float , _lowerCAmelCase : int ):
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Tuple ):
return str(self.k )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = cva.imread(_lowerCAmelCase , 0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(_lowerCAmelCase , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.gradient(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(_lowerCAmelCase , h - offset ):
for x in range(_lowerCAmelCase , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase__ , lowerCamelCase__ : str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 210 | 0 |
"""simple docstring"""
from math import factorial, pi
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 30 ) ->float:
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
a__: int = float(_SCREAMING_SNAKE_CASE )
a__: int = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_SCREAMING_SNAKE_CASE ) )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 30 ) ->float:
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
a__: Dict = float(_SCREAMING_SNAKE_CASE )
a__: List[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->str:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Optional[int] = F'Expected string as input, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[str] = F'Expected boolean as use_pascal parameter, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__: int = input_str.split('_' )
a__: List[str] = 0 if use_pascal else 1
a__: List[str] = words[start_index:]
a__: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
a__: List[str] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 290 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :Tuple )-> List[Any]:
# test for the above condition
self.test()
def UpperCAmelCase_ ( self :Dict )-> Any:
A__ = 0
A__ = False
while not completed:
if counter == 1:
self.reset()
A__ = self.advance()
if not self.does_advance(lowercase_ ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
A__, A__, A__ = self.update(lowercase_ )
counter += 1
if counter > 1_00_00:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def UpperCAmelCase_ ( self :str )-> Optional[int]:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCAmelCase_ ( self :str , lowercase_ :int )-> Optional[Any]:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :int )-> Optional[Any]:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCAmelCase_ ( self :str )-> List[str]:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCAmelCase_ ( self :Any )-> List[str]:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCAmelCase_ ( self :Dict , lowercase_ :List[Any]=False )-> List[Any]:
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :Dict , lowercase_ :List[int] )-> Optional[Any]:
super(lowercase_ , self ).__init__()
if not isinstance(lowercase_ , lowercase_ ) or len(lowercase_ ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(lowercase_ , lowercase_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
A__ = token_ids
A__ = len(self.token_ids )
A__ = -1 # the index of the currently fulfilled step
A__ = False
def UpperCAmelCase_ ( self :int )-> int:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self :str , lowercase_ :int )-> int:
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(lowercase_ )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :int )-> Dict:
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(lowercase_ )}" )
A__ = False
A__ = False
A__ = False
if self.does_advance(lowercase_ ):
self.fulfilled_idx += 1
A__ = True
if self.fulfilled_idx == (self.seqlen - 1):
A__ = True
A__ = completed
else:
# failed to make progress.
A__ = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase_ ( self :Optional[int] )-> List[Any]:
A__ = False
A__ = 0
def UpperCAmelCase_ ( self :int )-> Optional[Any]:
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :Dict=False )-> List[Any]:
A__ = PhrasalConstraint(self.token_ids )
if stateful:
A__ = self.seqlen
A__ = self.fulfilled_idx
A__ = self.completed
return new_constraint
class UpperCAmelCase :
def __init__( self :Any , lowercase_ :List[List[int]] , lowercase_ :List[Any]=True )-> Tuple:
A__ = max([len(lowercase_ ) for one in nested_token_ids] )
A__ = {}
for token_ids in nested_token_ids:
A__ = root
for tidx, token_id in enumerate(lowercase_ ):
if token_id not in level:
A__ = {}
A__ = level[token_id]
if no_subsets and self.has_subsets(lowercase_ , lowercase_ ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
F" {nested_token_ids}." )
A__ = root
def UpperCAmelCase_ ( self :List[str] , lowercase_ :Optional[Any] )-> Any:
A__ = self.trie
for current_token in current_seq:
A__ = start[current_token]
A__ = list(start.keys() )
return next_tokens
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] )-> Any:
A__ = self.next_tokens(lowercase_ )
return len(lowercase_ ) == 0
def UpperCAmelCase_ ( self :Any , lowercase_ :Tuple )-> List[str]:
A__ = list(root.values() )
if len(lowercase_ ) == 0:
return 1
else:
return sum([self.count_leaves(lowercase_ ) for nn in next_nodes] )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] )-> Any:
A__ = self.count_leaves(lowercase_ )
return len(lowercase_ ) != leaf_count
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :str , lowercase_ :List[List[int]] )-> List[str]:
super(lowercase_ , self ).__init__()
if not isinstance(lowercase_ , lowercase_ ) or len(lowercase_ ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(lowercase_ , lowercase_ ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(lowercase_ , lowercase_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
A__ = DisjunctiveTrie(lowercase_ )
A__ = nested_token_ids
A__ = self.trie.max_height
A__ = []
A__ = False
def UpperCAmelCase_ ( self :Union[str, Any] )-> str:
A__ = self.trie.next_tokens(self.current_seq )
if len(lowercase_ ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self :List[str] , lowercase_ :int )-> Optional[int]:
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowercase_ )}" )
A__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :int )-> List[Any]:
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowercase_ )}" )
A__ = False
A__ = False
A__ = False
if self.does_advance(lowercase_ ):
self.current_seq.append(lowercase_ )
A__ = True
else:
A__ = True
self.reset()
A__ = self.trie.reached_leaf(self.current_seq )
A__ = completed
return stepped, completed, reset
def UpperCAmelCase_ ( self :str )-> Union[str, Any]:
A__ = False
A__ = []
def UpperCAmelCase_ ( self :List[str] )-> Optional[int]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase_ ( self :Tuple , lowercase_ :Any=False )-> Dict:
A__ = DisjunctiveConstraint(self.token_ids )
if stateful:
A__ = self.seqlen
A__ = self.current_seq
A__ = self.completed
return new_constraint
class UpperCAmelCase :
def __init__( self :int , lowercase_ :List[Constraint] )-> Union[str, Any]:
A__ = constraints
# max # of steps required to fulfill a given constraint
A__ = max([c.seqlen for c in constraints] )
A__ = len(lowercase_ )
A__ = False
self.init_state()
def UpperCAmelCase_ ( self :int )-> Dict:
A__ = []
A__ = None
A__ = [constraint.copy(stateful=lowercase_ ) for constraint in self.constraints]
def UpperCAmelCase_ ( self :Any )-> Union[str, Any]:
A__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase_ ( self :List[str] )-> Any:
A__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
A__ = constraint.advance()
if isinstance(lowercase_ , lowercase_ ):
token_list.append(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
token_list.extend(lowercase_ )
else:
A__ = self.inprogress_constraint.advance()
if isinstance(lowercase_ , lowercase_ ):
token_list.append(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
token_list.extend(lowercase_ )
if len(lowercase_ ) == 0:
return None
else:
return token_list
def UpperCAmelCase_ ( self :int , lowercase_ :Optional[List[int]] )-> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
A__, A__ = self.add(lowercase_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase_ ( self :str , lowercase_ :int )-> Optional[Any]:
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
A__, A__ = False, False
if self.completed:
A__ = True
A__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
A__, A__, A__ = self.inprogress_constraint.update(lowercase_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=lowercase_ ) )
A__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
A__ = None
if len(self.pending_constraints ) == 0:
# we're done!
A__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(lowercase_ ):
A__, A__, A__ = pending_constraint.update(lowercase_ )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(lowercase_ )
A__ = None
if not complete and stepped:
A__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
A__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
A__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase_ ( self :int , lowercase_ :Union[str, Any]=True )-> Dict:
A__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
A__ = [
constraint.copy(stateful=lowercase_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
A__ = self.inprogress_constraint.copy(stateful=lowercase_ )
A__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 123 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] =[
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def UpperCamelCase ( _lowerCamelCase : int ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
A__ = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("encoder" ):
A__ = k.replace(".attn" , ".self_attn" )
A__ = k.replace("norm1" , "self_attn_layer_norm" )
A__ = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
A__ = k.replace("norm1" , "self_attn_layer_norm" )
A__ = k.replace("norm2" , "encoder_attn_layer_norm" )
A__ = k.replace("norm3" , "final_layer_norm" )
return k
def UpperCamelCase ( _lowerCamelCase : int ):
A__ = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
A__ = sd.pop(_lowerCamelCase )
A__ = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
A__ = v
__lowerCAmelCase : Optional[int] =["START"]
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] ):
A__ = torch.load(_lowerCamelCase , map_location="cpu" )
A__ = model["model"]
A__ = BlenderbotConfig.from_json_file(_lowerCamelCase )
A__ = BlenderbotForConditionalGeneration(_lowerCamelCase )
A__ = m.model.state_dict().keys()
A__ = []
A__ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
A__ = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
A__ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__lowerCAmelCase : Union[str, Any] =parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 123 | 1 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Tuple = tmp_path / "cache"
__UpperCamelCase : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase : Any = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = tmp_path / "cache"
__UpperCamelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__UpperCamelCase : int = features.copy() if features else default_expected_features
__UpperCamelCase : int = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase : int = ParquetDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = tmp_path / "cache"
__UpperCamelCase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__UpperCamelCase : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if issubclass(snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = parquet_path
elif issubclass(snake_case__ , snake_case__ ):
__UpperCamelCase : Tuple = [parquet_path]
__UpperCamelCase : Optional[int] = tmp_path / "cache"
__UpperCamelCase : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__UpperCamelCase : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_dataset(snake_case__ , snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=("train",) ):
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
__UpperCamelCase : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Tuple = tmp_path / "cache"
__UpperCamelCase : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase : Dict = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Tuple = tmp_path / "cache"
__UpperCamelCase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__UpperCamelCase : Optional[Any] = features.copy() if features else default_expected_features
__UpperCamelCase : Optional[Any] = (
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase : str = ParquetDatasetReader({"train": parquet_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if split:
__UpperCamelCase : Optional[int] = {split: parquet_path}
else:
__UpperCamelCase : Optional[Any] = "train"
__UpperCamelCase : Tuple = {"train": parquet_path, "test": parquet_path}
__UpperCamelCase : Optional[int] = tmp_path / "cache"
__UpperCamelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
__UpperCamelCase : List[str] = ParquetDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_parquet_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = ParquetDatasetWriter(snake_case__ , tmp_path / "foo.parquet" )
assert writer.write() > 0
__UpperCamelCase : Optional[Any] = pq.ParquetFile(tmp_path / "foo.parquet" )
__UpperCamelCase : Optional[int] = pf.read()
assert dataset.data.table == output_table
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = str(shared_datadir / "test_image_rgb.jpg" )
__UpperCamelCase : Any = {"image": [image_path]}
__UpperCamelCase : List[str] = Features({"image": Image()} )
__UpperCamelCase : Optional[int] = Dataset.from_dict(snake_case__ , features=snake_case__ )
__UpperCamelCase : Tuple = ParquetDatasetWriter(snake_case__ , tmp_path / "foo.parquet" )
assert writer.write() > 0
__UpperCamelCase : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
__UpperCamelCase : Optional[int] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=snake_case__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
assert get_writer_batch_size(snake_case__ ) == expected
| 298 | import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 338 | 0 |
"""simple docstring"""
def lowerCamelCase () -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999)
for b in range(a_ , 999)
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 358 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "swin2sr"
__A : Dict = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , snake_case__ : List[str]=6_4 , snake_case__ : Union[str, Any]=1 , snake_case__ : Tuple=3 , snake_case__ : int=1_8_0 , snake_case__ : Union[str, Any]=[6, 6, 6, 6, 6, 6] , snake_case__ : List[str]=[6, 6, 6, 6, 6, 6] , snake_case__ : Tuple=8 , snake_case__ : List[Any]=2.0 , snake_case__ : Any=True , snake_case__ : Dict=0.0 , snake_case__ : Dict=0.0 , snake_case__ : Dict=0.1 , snake_case__ : Dict="gelu" , snake_case__ : Optional[int]=False , snake_case__ : Any=0.02 , snake_case__ : Any=1e-5 , snake_case__ : Optional[int]=2 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]="1conv" , snake_case__ : List[str]="pixelshuffle" , **snake_case__ : Tuple , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :Dict = image_size
lowercase :List[str] = patch_size
lowercase :Tuple = num_channels
lowercase :int = embed_dim
lowercase :Any = depths
lowercase :Union[str, Any] = len(snake_case__ )
lowercase :List[str] = num_heads
lowercase :int = window_size
lowercase :Tuple = mlp_ratio
lowercase :List[Any] = qkv_bias
lowercase :Optional[int] = hidden_dropout_prob
lowercase :Tuple = attention_probs_dropout_prob
lowercase :Tuple = drop_path_rate
lowercase :Optional[Any] = hidden_act
lowercase :Union[str, Any] = use_absolute_embeddings
lowercase :Dict = layer_norm_eps
lowercase :Optional[Any] = initializer_range
lowercase :Optional[Any] = upscale
lowercase :Any = img_range
lowercase :Optional[int] = resi_connection
lowercase :Union[str, Any] = upsampler
| 172 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : Dict ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError('''only integers accepted as input''' )
else:
lowerCAmelCase : str = str(abs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )]
for index in range(len(__SCREAMING_SNAKE_CASE ) ):
num_transpositions[index].pop(__SCREAMING_SNAKE_CASE )
return max(
int(''''''.join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 60 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Tuple = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ : str = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : int = """tokenizer_file"""
SCREAMING_SNAKE_CASE_ : List[str] = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def lowercase_ ( self : List[Any])-> Dict:
'''simple docstring'''
super().setUp()
__lowerCAmelCase: Optional[Any] = BloomTokenizerFast.from_pretrained("bigscience/tokenizer")
tokenizer.save_pretrained(self.tmpdirname)
def lowercase_ ( self : List[Any] , **UpperCamelCase__ : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__)
def lowercase_ ( self : Union[str, Any])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = self.get_rust_tokenizer()
__lowerCAmelCase: int = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
__lowerCAmelCase: List[str] = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
__lowerCAmelCase: List[str] = tokenizer.batch_encode_plus(UpperCamelCase__)["input_ids"]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: List[Any] = tokenizer.batch_decode(UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Tuple=6)-> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCAmelCase: Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__lowerCAmelCase: Dict = "This is a simple input"
__lowerCAmelCase: str = ["This is a simple input 1", "This is a simple input 2"]
__lowerCAmelCase: int = ("This is a simple input", "This is a pair")
__lowerCAmelCase: Union[str, Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__)
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding")
__lowerCAmelCase: Tuple = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" , )
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.get_rust_tokenizer()
__lowerCAmelCase: List[str] = load_dataset("xnli" , "all_languages" , split="test" , streaming=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = next(iter(UpperCamelCase__))["premise"] # pick up one data
__lowerCAmelCase: Any = list(sample_data.values())
__lowerCAmelCase: int = list(map(tokenizer.encode , UpperCamelCase__))
__lowerCAmelCase: str = [tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__) for x in output_tokens]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Optional[int])-> str:
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 217 | 0 |
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int = 100 ) -> int:
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Union[str, Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 352 |
from __future__ import annotations
from random import choice
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Optional[int]:
return choice(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[int] , lowerCAmelCase: int ) -> int:
_UpperCAmelCase : List[Any] = random_pivot(lowerCAmelCase )
# partition based on pivot
# linear time
_UpperCAmelCase : List[str] = [e for e in lst if e < pivot]
_UpperCAmelCase : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCAmelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCAmelCase ) < k - 1:
return kth_number(lowerCAmelCase , k - len(lowerCAmelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 189 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = """gpt_neox_japanese"""
def __init__( self : Optional[int] , a_ : List[str]=3_20_00 , a_ : Tuple=25_60 , a_ : List[Any]=32 , a_ : Tuple=32 , a_ : str=4 , a_ : Dict="gelu" , a_ : Optional[int]=1.0_0 , a_ : int=1_00_00 , a_ : str=20_48 , a_ : str=0.0_2 , a_ : int=1e-5 , a_ : Any=True , a_ : Union[str, Any]=3_19_96 , a_ : Tuple=3_19_99 , a_ : Any=0.1 , a_ : str=0.0 , **a_ : List[Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_multiple_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Dict = rotary_pct
__UpperCAmelCase : Optional[int] = rotary_emb_base
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = layer_norm_eps
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : Optional[int] = attention_dropout
__UpperCAmelCase : List[Any] = hidden_dropout
| 226 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A =logging.get_logger(__name__)
def a ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = b.T
__UpperCAmelCase : Any = np.sum(np.square(_UpperCAmelCase ) , axis=1 )
__UpperCAmelCase : int = np.sum(np.square(_UpperCAmelCase ) , axis=0 )
__UpperCAmelCase : Optional[int] = np.matmul(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : List[str] = aa[:, None] - 2 * ab + ba[None, :]
return d
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = x.reshape(-1 , 3 )
__UpperCAmelCase : Optional[int] = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase )
return np.argmin(_UpperCAmelCase , axis=1 )
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ["""pixel_values"""]
def __init__( self : str , a_ : Optional[Union[List[List[int]], np.ndarray]] = None , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : bool = True , a_ : bool = True , **a_ : List[str] , ):
'''simple docstring'''
super().__init__(**a_ )
__UpperCAmelCase : Optional[int] = size if size is not None else {'''height''': 2_56, '''width''': 2_56}
__UpperCAmelCase : List[str] = get_size_dict(a_ )
__UpperCAmelCase : str = np.array(a_ ) if clusters is not None else None
__UpperCAmelCase : Dict = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = resample
__UpperCAmelCase : Tuple = do_normalize
__UpperCAmelCase : Optional[int] = do_color_quantize
def snake_case__ ( self : Optional[Any] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Dict , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
a_ , size=(size['''height'''], size['''width''']) , resample=a_ , data_format=a_ , **a_ )
def snake_case__ ( self : Tuple , a_ : np.ndarray , a_ : Optional[Union[str, ChannelDimension]] = None , ):
'''simple docstring'''
__UpperCAmelCase : Dict = rescale(image=a_ , scale=1 / 1_2_7.5 , data_format=a_ )
__UpperCAmelCase : Union[str, Any] = image - 1
return image
def snake_case__ ( self : int , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : Optional[bool] = None , a_ : Optional[Union[List[List[int]], np.ndarray]] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **a_ : Any , ):
'''simple docstring'''
__UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : List[str] = size if size is not None else self.size
__UpperCAmelCase : Any = get_size_dict(a_ )
__UpperCAmelCase : Optional[int] = resample if resample is not None else self.resample
__UpperCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : int = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase : Optional[int] = clusters if clusters is not None else self.clusters
__UpperCAmelCase : Any = np.array(a_ )
__UpperCAmelCase : Optional[int] = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase : List[Any] = [to_numpy_array(a_ ) for image in images]
if do_resize:
__UpperCAmelCase : List[str] = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_normalize:
__UpperCAmelCase : Dict = [self.normalize(image=a_ ) for image in images]
if do_color_quantize:
__UpperCAmelCase : int = [to_channel_dimension_format(a_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase : List[str] = np.array(a_ )
__UpperCAmelCase : Dict = color_quantize(a_ , a_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase : Any = images.shape[0]
__UpperCAmelCase : Any = images.reshape(a_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase : List[Any] = list(a_ )
else:
__UpperCAmelCase : int = [to_channel_dimension_format(a_ , a_ ) for image in images]
__UpperCAmelCase : int = {'''input_ids''': images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 226 | 1 |
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCamelCase : Union[str, Any] = 637_8137.0
lowerCamelCase : Dict = 635_6752.31_4245
lowerCamelCase : Dict = 6_3_7_8_1_3_7
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> float:
snake_case : Union[str, Any] = (AXIS_A - AXIS_B) / AXIS_A
snake_case : Union[str, Any] = atan((1 - flattening) * tan(radians(lowercase ) ) )
snake_case : Union[str, Any] = atan((1 - flattening) * tan(radians(lowercase ) ) )
snake_case : List[str] = radians(lowercase )
snake_case : Dict = radians(lowercase )
# Equation
snake_case : Union[str, Any] = sin((phi_a - phi_a) / 2 )
snake_case : List[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
snake_case : Tuple = sqrt(sin_sq_phi + (cos(lowercase ) * cos(lowercase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
lowerCamelCase : Tuple = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( A ) -> List[str]:
snake_case : Union[str, Any] = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=A , required=A , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=A , required=A , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=A , required=A , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=A , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=A , default=A , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=A )
def __init__( self , A , A , A , A , A , *A , ) -> List[Any]:
snake_case : Any = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f"""Loading model {model_type}""" )
snake_case : int = model_type
snake_case : Any = tf_checkpoint
snake_case : int = pytorch_dump_output
snake_case : List[str] = config
snake_case : Tuple = finetuning_task_name
def UpperCAmelCase ( self ) -> int:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
if "ckpt" in self._tf_checkpoint.lower():
snake_case : List[Any] = self._tf_checkpoint
snake_case : Tuple = """"""
else:
snake_case : Tuple = self._tf_checkpoint
snake_case : Tuple = """"""
convert_transfo_xl_checkpoint_to_pytorch(
A , self._config , self._pytorch_dump_output , A )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 176 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCamelCase__ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = {}
with open(UpperCamelCase__, "r" ) as file:
for line_number, line in enumerate(UpperCamelCase__ ):
UpperCAmelCase__ = line.strip()
if line:
UpperCAmelCase__ = line.split()
UpperCAmelCase__ = line_number
UpperCAmelCase__ = words[0]
UpperCAmelCase__ = value
return result
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> int:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ = getattr(UpperCamelCase__, UpperCamelCase__ )
UpperCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase__ ):
UpperCAmelCase__ = PARAM_MAPPING[full_name.split("." )[-1]]
UpperCAmelCase__ = """param"""
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ = getattr(UpperCamelCase__, UpperCamelCase__ ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ = hf_pointer
for attribute in hf_param_name.split("." ):
UpperCAmelCase__ = getattr(UpperCamelCase__, UpperCamelCase__ )
UpperCAmelCase__ = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase__ = value[0]
else:
UpperCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ = value
elif weight_type == "weight_g":
UpperCAmelCase__ = value
elif weight_type == "weight_v":
UpperCAmelCase__ = value
elif weight_type == "bias":
UpperCAmelCase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
UpperCAmelCase__ = getattr(UpperCamelCase__, UpperCamelCase__ )
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase__ ):
UpperCAmelCase__ = PARAM_MAPPING[full_name.split("." )[-1]]
UpperCAmelCase__ = """param"""
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ = """.""".join([key, hf_param_name] )
else:
UpperCAmelCase__ = key
UpperCAmelCase__ = value if """lm_head""" in full_key else value[0]
UpperCamelCase__ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def lowerCAmelCase_ ( __A, __A, __A=None, __A=None ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase__ = True
if "*" in mapped_key:
UpperCAmelCase__ = name.split(UpperCamelCase__ )[0].split("." )[-2]
UpperCAmelCase__ = mapped_key.replace("*", UpperCamelCase__ )
if "weight_g" in name:
UpperCAmelCase__ = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase__ = """weight_v"""
elif "bias" in name:
UpperCAmelCase__ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ = """weight"""
else:
UpperCAmelCase__ = None
if hf_dict is not None:
rename_dict(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else:
set_recursively(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
return is_used
return is_used
def lowerCAmelCase_ ( __A, __A, __A ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = fairseq_model.state_dict()
UpperCAmelCase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, hf_model.config.feat_extract_norm == "group", )
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = load_wavaveca_layer(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = full_name.split("conv_layers." )[-1]
UpperCAmelCase__ = name.split("." )
UpperCAmelCase__ = int(items[0] )
UpperCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCAmelCase_ ( __A, __A, __A=None, __A=None, __A=True, __A=False ) -> Tuple:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
else:
UpperCAmelCase__ = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase__ = read_txt_into_dict(UpperCamelCase__ )
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = WavaVecaForSequenceClassification(UpperCamelCase__ )
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=UpperCamelCase__, return_attention_mask=UpperCamelCase__, )
feature_extractor.save_pretrained(UpperCamelCase__ )
elif is_finetuned:
if dict_path:
UpperCAmelCase__ = Dictionary.load(UpperCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ = target_dict.pad_index
UpperCAmelCase__ = target_dict.bos_index
UpperCAmelCase__ = target_dict.eos_index
UpperCAmelCase__ = len(target_dict.symbols )
UpperCAmelCase__ = os.path.join(UpperCamelCase__, "vocab.json" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCamelCase__ ) )
return
os.makedirs(UpperCamelCase__, exist_ok=UpperCamelCase__ )
UpperCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
with open(UpperCamelCase__, "w", encoding="utf-8" ) as vocab_handle:
json.dump(UpperCamelCase__, UpperCamelCase__ )
UpperCAmelCase__ = WavaVecaCTCTokenizer(
UpperCamelCase__, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=UpperCamelCase__, )
UpperCAmelCase__ = True if config.feat_extract_norm == """layer""" else False
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=UpperCamelCase__, return_attention_mask=UpperCamelCase__, )
UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=UpperCamelCase__, tokenizer=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
UpperCAmelCase__ = WavaVecaForCTC(UpperCamelCase__ )
else:
UpperCAmelCase__ = WavaVecaForPreTraining(UpperCamelCase__ )
if is_finetuned or is_seq_class:
UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase__ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase__ = fairseq.tasks.setup_task(UpperCamelCase__ )
UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=UpperCamelCase__ )
UpperCAmelCase__ = model[0].eval()
recursively_load_weights(UpperCamelCase__, UpperCamelCase__, not is_finetuned )
hf_wavavec.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 65 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'vocab.json'}
_snake_case = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_snake_case = {'mgp-str': 27}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : List[str] = VOCAB_FILES_NAMES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]="[GO]" , UpperCAmelCase__ : Tuple="[GO]" , UpperCAmelCase__ : Optional[int]="[s]" , UpperCAmelCase__ : int="[GO]" , **UpperCAmelCase__ : Dict ) -> int:
super().__init__(
unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
_a : int = json.load(UpperCAmelCase__ )
_a : Optional[int] = {v: k for k, v in self.vocab.items()}
@property
def _lowercase ( self : Dict ) -> Union[str, Any]:
return len(self.vocab )
def _lowercase ( self : Union[str, Any] ) -> str:
return dict(self.vocab , **self.added_tokens_encoder )
def _lowercase ( self : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]:
_a : Tuple = []
for s in text:
char_tokens.extend(UpperCAmelCase__ )
return char_tokens
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> Dict:
return self.vocab.get(UpperCAmelCase__ , self.vocab.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]:
return self.decoder.get(UpperCAmelCase__ )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCAmelCase__ ) )
return
_a : Tuple = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) + """\n""" )
return (vocab_file,)
| 294 | 0 |
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
__a: Dict = True
from torch.cuda.amp import autocast
__a: str = logging.getLogger(__name__)
def __UpperCamelCase ( UpperCAmelCase=None , UpperCAmelCase=None ):
return field(default_factory=lambda: default , metadata=UpperCAmelCase )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
SCREAMING_SNAKE_CASE = field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
SCREAMING_SNAKE_CASE = field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
SCREAMING_SNAKE_CASE = field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE = field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE = list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __call__( self , __lowerCAmelCase ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
lowercase__ : int = [{'''input_values''': feature['''input_values''']} for feature in features]
lowercase__ : List[Any] = [{'''input_ids''': feature['''labels''']} for feature in features]
lowercase__ : Tuple = self.processor.pad(
__lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
lowercase__ : List[str] = self.processor.pad(
labels=__lowerCAmelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
lowercase__ : Any = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
lowercase__ : Dict = labels
return batch
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> torch.Tensor:
model.train()
lowercase__ : Union[str, Any] = self._prepare_inputs(__lowerCAmelCase )
if self.use_amp:
with autocast():
lowercase__ : Optional[int] = self.compute_loss(__lowerCAmelCase , __lowerCAmelCase )
else:
lowercase__ : Optional[Any] = self.compute_loss(__lowerCAmelCase , __lowerCAmelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
lowercase__ : Union[str, Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase__ : List[Any] = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
lowercase__ : Union[str, Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__lowerCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(__lowerCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__lowerCAmelCase )
else:
loss.backward()
return loss.detach()
def __UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase__ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
lowercase__ : int = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
lowercase__ : Optional[Any] = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
lowercase__ : Any = F"""[{''.join(data_args.chars_to_ignore )}]"""
def remove_special_characters(UpperCAmelCase ):
lowercase__ : Union[str, Any] = re.sub(UpperCAmelCase , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
lowercase__ : Optional[int] = train_dataset.map(UpperCAmelCase , remove_columns=['''sentence'''] )
lowercase__ : Tuple = eval_dataset.map(UpperCAmelCase , remove_columns=['''sentence'''] )
def extract_all_chars(UpperCAmelCase ):
lowercase__ : str = ''' '''.join(batch['''text'''] )
lowercase__ : Optional[Any] = list(set(UpperCAmelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
lowercase__ : str = train_dataset.map(
UpperCAmelCase , batched=UpperCAmelCase , batch_size=-1 , keep_in_memory=UpperCAmelCase , remove_columns=train_dataset.column_names , )
lowercase__ : Tuple = train_dataset.map(
UpperCAmelCase , batched=UpperCAmelCase , batch_size=-1 , keep_in_memory=UpperCAmelCase , remove_columns=eval_dataset.column_names , )
lowercase__ : int = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
lowercase__ : Any = {v: k for k, v in enumerate(UpperCAmelCase )}
lowercase__ : Union[str, Any] = vocab_dict[''' ''']
del vocab_dict[" "]
lowercase__ : Dict = len(UpperCAmelCase )
lowercase__ : str = len(UpperCAmelCase )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(UpperCAmelCase , UpperCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : List[Any] = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
lowercase__ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0.0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase )
lowercase__ : Union[str, Any] = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
lowercase__ : Optional[Any] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
lowercase__ : str = min(len(UpperCAmelCase ) , data_args.max_train_samples )
lowercase__ : Optional[int] = train_dataset.select(range(UpperCAmelCase ) )
if data_args.max_val_samples is not None:
lowercase__ : List[Any] = eval_dataset.select(range(data_args.max_val_samples ) )
lowercase__ : int = torchaudio.transforms.Resample(4_8000 , 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(UpperCAmelCase ):
lowercase__ , lowercase__ : Any = torchaudio.load(batch['''path'''] )
lowercase__ : List[str] = resampler(UpperCAmelCase ).squeeze().numpy()
lowercase__ : Optional[Any] = 1_6000
lowercase__ : List[str] = batch['''text''']
return batch
lowercase__ : Any = train_dataset.map(
UpperCAmelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
lowercase__ : List[Any] = eval_dataset.map(
UpperCAmelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(UpperCAmelCase ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
lowercase__ : Optional[Any] = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(UpperCAmelCase )
return batch
lowercase__ : Tuple = train_dataset.map(
UpperCAmelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , )
lowercase__ : List[str] = eval_dataset.map(
UpperCAmelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
lowercase__ : Tuple = datasets.load_metric('''wer''' )
def compute_metrics(UpperCAmelCase ):
lowercase__ : Dict = pred.predictions
lowercase__ : Any = np.argmax(UpperCAmelCase , axis=-1 )
lowercase__ : Union[str, Any] = processor.tokenizer.pad_token_id
lowercase__ : Optional[int] = processor.batch_decode(UpperCAmelCase )
# we do not want to group tokens when computing the metrics
lowercase__ : List[str] = processor.batch_decode(pred.label_ids , group_tokens=UpperCAmelCase )
lowercase__ : Optional[int] = wer_metric.compute(predictions=UpperCAmelCase , references=UpperCAmelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
lowercase__ : Union[str, Any] = DataCollatorCTCWithPadding(processor=UpperCAmelCase , padding=UpperCAmelCase )
# Initialize our Trainer
lowercase__ : Optional[int] = CTCTrainer(
model=UpperCAmelCase , data_collator=UpperCAmelCase , args=UpperCAmelCase , compute_metrics=UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase__ : List[Any] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
lowercase__ : Optional[Any] = model_args.model_name_or_path
else:
lowercase__ : Union[str, Any] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
lowercase__ : Tuple = trainer.train(resume_from_checkpoint=UpperCAmelCase )
trainer.save_model()
lowercase__ : Union[str, Any] = train_result.metrics
lowercase__ : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase )
)
lowercase__ : List[str] = min(UpperCAmelCase , len(UpperCAmelCase ) )
trainer.log_metrics('''train''' , UpperCAmelCase )
trainer.save_metrics('''train''' , UpperCAmelCase )
trainer.save_state()
# Evaluation
lowercase__ : List[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ : Optional[int] = trainer.evaluate()
lowercase__ : Dict = data_args.max_val_samples if data_args.max_val_samples is not None else len(UpperCAmelCase )
lowercase__ : Optional[Any] = min(UpperCAmelCase , len(UpperCAmelCase ) )
trainer.log_metrics('''eval''' , UpperCAmelCase )
trainer.save_metrics('''eval''' , UpperCAmelCase )
return results
if __name__ == "__main__":
main()
| 214 | '''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a: List[str] = logging.get_logger(__name__)
__a: int = """▁"""
__a: Optional[int] = {"""vocab_file""": """prophetnet.tokenizer"""}
__a: Optional[int] = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
__a: List[str] = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
__a: Tuple = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Dict = collections.OrderedDict()
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as reader:
lowercase__ : List[Any] = reader.readlines()
for index, token in enumerate(UpperCAmelCase ):
lowercase__ : List[Any] = token.rstrip('''\n''' )
lowercase__ : Tuple = index
return vocab
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="[UNK]" , __lowerCAmelCase="[PAD]" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> None:
lowercase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
lowercase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
lowercase__ : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowercase__ : str = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
lowercase__ : Tuple = F"""[unused{i}]"""
lowercase__ : List[Any] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowercase__ : Optional[Any] = 12
lowercase__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__lowerCAmelCase )
def __getstate__( self ) -> Union[str, Any]:
lowercase__ : Dict = self.__dict__.copy()
lowercase__ : Optional[Any] = None
return state
def __setstate__( self , __lowerCAmelCase ) -> Dict:
lowercase__ : Any = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ : List[Any] = {}
lowercase__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return ([0] * len(__lowerCAmelCase )) + [1]
return ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase( self ) -> List[Any]:
return len(self.sp_model ) + self.fairseq_offset
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : str = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Tuple = self.sp_model.PieceToId(__lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : Optional[Any] = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : str = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
lowercase__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowercase__ : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 214 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_UpperCamelCase = TypeVar('''T''')
class _lowerCamelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> None:
'''simple docstring'''
__snake_case : Any | T = None
__snake_case : int = len(UpperCAmelCase )
__snake_case : list[T] = [any_type for _ in range(self.N )] + arr
__snake_case : Union[str, Any] = fnc
self.build()
def UpperCAmelCase ( self ) -> None:
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
__snake_case : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> None:
'''simple docstring'''
p += self.N
__snake_case : Any = v
while p > 1:
__snake_case : Union[str, Any] = p // 2
__snake_case : int = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> T | None: # noqa: E741
'''simple docstring'''
__snake_case , __snake_case : Tuple = l + self.N, r + self.N
__snake_case : T | None = None
while l <= r:
if l % 2 == 1:
__snake_case : List[str] = self.st[l] if res is None else self.fn(UpperCAmelCase , self.st[l] )
if r % 2 == 0:
__snake_case : Dict = self.st[r] if res is None else self.fn(UpperCAmelCase , self.st[r] )
__snake_case , __snake_case : Optional[int] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_UpperCamelCase = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_UpperCamelCase = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_UpperCamelCase = SegmentTree(test_array, min)
_UpperCamelCase = SegmentTree(test_array, max)
_UpperCamelCase = SegmentTree(test_array, lambda a, b: a + b)
def lowerCAmelCase__( ) -> None:
for i in range(len(lowercase ) ):
for j in range(lowercase , len(lowercase ) ):
__snake_case : List[str] = reduce(lowercase , test_array[i : j + 1] )
__snake_case : int = reduce(lowercase , test_array[i : j + 1] )
__snake_case : Union[str, Any] = reduce(lambda lowercase , lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowercase , lowercase )
assert max_range == max_segment_tree.query(lowercase , lowercase )
assert sum_range == sum_segment_tree.query(lowercase , lowercase )
test_all_segments()
for index, value in test_updates.items():
_UpperCamelCase = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 326 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : str =JukeboxTokenizer
UpperCAmelCase_ : Tuple ={
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
import torch
__snake_case : List[str] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
__snake_case : Union[str, Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case : Optional[Any] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
import torch
__snake_case : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
__snake_case : Tuple = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case : int = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 326 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , '''Tatoeba directory does not exist.''' )
class UpperCamelCase_ (unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowercase )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
self.resolver.convert_models(["heb-eng"] )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
UpperCAmelCase_ : Optional[int] = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__lowercase )
assert mmeta["long_pair"] == "heb-eng"
| 367 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
lowerCamelCase_ = {'''bert_for_seq_generation''': 512}
class UpperCamelCase_ (__A ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = []
__magic_name__ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : int="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Tuple="<::::>" , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Union[str, Any] , ) -> None:
UpperCAmelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
UpperCAmelCase_ : List[str] = vocab_file
UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_ : List[str] = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.__dict__.copy()
UpperCAmelCase_ : List[Any] = None
return state
def __setstate__( self : Dict , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Dict:
return self.sp_model.piece_to_id(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : int ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.sp_model.IdToPiece(lowerCAmelCase_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Tuple = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
UpperCAmelCase_ : Tuple = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , "wb" ) as fi:
UpperCAmelCase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 253 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
if not (isinstance(lowercase ,lowercase ) and isinstance(lowercase ,lowercase )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
snake_case : Dict = len(lowercase )
snake_case : List[Any] = len(lowercase )
snake_case : Optional[int] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
snake_case : Dict = 0
snake_case : int = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
snake_case : List[Any] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
snake_case : Any = i
snake_case : Dict = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""image_processor""", """tokenizer"""]
_snake_case = """FlavaImageProcessor"""
_snake_case = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , A=None , A=None , **A ) -> Tuple:
snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A , )
snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
snake_case : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A , A )
snake_case : Dict = self.image_processor
def __call__( self , A = None , A = None , A = True , A = False , A = False , A = None , A = 0 , A = None , A = None , A = None , A = None , A = None , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> Tuple:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case : str = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
if images is not None:
snake_case : Tuple = self.image_processor(
A , return_image_mask=A , return_codebook_pixels=A , return_tensors=A , **A , )
if text is not None and images is not None:
encoding.update(A )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) , tensor_type=A )
def UpperCAmelCase ( self , *A , **A ) -> List[str]:
return self.tokenizer.batch_decode(*A , **A )
def UpperCAmelCase ( self , *A , **A ) -> int:
return self.tokenizer.decode(*A , **A )
@property
def UpperCAmelCase ( self ) -> str:
snake_case : Any = self.tokenizer.model_input_names
snake_case : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ) -> Dict:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A , )
return self.image_processor
| 124 | 1 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7_6_8 ):
super().__init__(__lowercase )
UpperCamelCase_: str = proj_size
UpperCamelCase_: str = CLIPVisionModel(__lowercase )
UpperCamelCase_: Optional[int] = PaintByExampleMapper(__lowercase )
UpperCamelCase_: List[str] = nn.LayerNorm(config.hidden_size )
UpperCamelCase_: Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCamelCase_: int = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _a ( self , _lowerCamelCase , _lowerCamelCase=False ):
UpperCamelCase_: Optional[Any] = self.model(pixel_values=__lowercase )
UpperCamelCase_: Optional[int] = clip_output.pooler_output
UpperCamelCase_: int = self.mapper(latent_states[:, None] )
UpperCamelCase_: Tuple = self.final_layer_norm(__lowercase )
UpperCamelCase_: Union[str, Any] = self.proj_out(__lowercase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _lowerCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
super().__init__()
UpperCamelCase_: Optional[int] = (config.num_hidden_layers + 1) // 5
UpperCamelCase_: str = config.hidden_size
UpperCamelCase_: Dict = 1
UpperCamelCase_: List[Any] = nn.ModuleList(
[
BasicTransformerBlock(__lowercase , __lowercase , __lowercase , activation_fn='gelu' , attention_bias=__lowercase )
for _ in range(__lowercase )
] )
def _a ( self , _lowerCamelCase ):
for block in self.blocks:
UpperCamelCase_: Optional[Any] = block(__lowercase )
return hidden_states | 367 |
def snake_case (UpperCAmelCase__ ) -> int:
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
UpperCamelCase_: List[Any] = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(UpperCAmelCase__ )
else:
UpperCamelCase_: str = sylvester(number - 1 )
UpperCamelCase_: str = num - 1
UpperCamelCase_: Any = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''') | 292 | 0 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A ) -> str:
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
snake_case : Optional[Any] = img
snake_case : Union[str, Any] = img.shape[1]
snake_case : Tuple = img.shape[0]
snake_case : Optional[Any] = dst_width
snake_case : Optional[int] = dst_height
snake_case : Dict = self.src_w / self.dst_w
snake_case : str = self.src_h / self.dst_h
snake_case : Dict = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_5_5
)
def UpperCAmelCase ( self ) -> List[Any]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
snake_case : List[str] = self.img[self.get_y(_A )][self.get_x(_A )]
def UpperCAmelCase ( self , A ) -> str:
return int(self.ratio_x * x )
def UpperCAmelCase ( self , A ) -> Dict:
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCamelCase : Dict = 8_0_0, 6_0_0
lowerCamelCase : Dict = imread('image_data/lena.jpg', 1)
lowerCamelCase : Any = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 124 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=1_8 , lowercase=3_0 , lowercase=4_0_0 , lowercase=True , lowercase=None , lowercase=True , ):
"""simple docstring"""
A_ : List[str] = size if size is not None else {'height': 1_8, 'width': 1_8}
A_ : List[str] = parent
A_ : Optional[int] = batch_size
A_ : Any = num_channels
A_ : int = image_size
A_ : Optional[int] = min_resolution
A_ : Tuple = max_resolution
A_ : Union[str, Any] = do_resize
A_ : Optional[Any] = size
A_ : Optional[Any] = apply_ocr
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , 'do_resize' ) )
self.assertTrue(hasattr(lowercase , 'size' ) )
self.assertTrue(hasattr(lowercase , 'apply_ocr' ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
A_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , lowercase )
self.assertIsInstance(encoding.boxes , lowercase )
# Test batched
A_ : str = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A_ : Optional[int] = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A_ : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A_ : str = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
A_ : List[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
A_ : Tuple = Image.open(ds[0]['file'] ).convert('RGB' )
A_ : Union[str, Any] = image_processing(lowercase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
A_ : Any = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowercase )
self.assertListEqual(encoding.boxes , lowercase )
# with apply_OCR = False
A_ : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=lowercase )
A_ : Any = image_processing(lowercase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 192 | import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase=None , **lowercase ):
"""simple docstring"""
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
A_ : List[Any] = model
A_ : Dict = kwargs.get('model_save_dir' , lowercase )
A_ : List[str] = kwargs.get('latest_model_name' , lowercase )
def __call__( self , **lowercase ):
"""simple docstring"""
A_ : str = {k: np.array(lowercase ) for k, v in kwargs.items()}
return self.model.run(lowercase , lowercase )
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase=None , lowercase=None ):
"""simple docstring"""
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
A_ : List[Any] = 'CPUExecutionProvider'
return ort.InferenceSession(lowercase , providers=[provider] , sess_options=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , **lowercase ):
"""simple docstring"""
A_ : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A_ : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
A_ : int = Path(lowercase ).joinpath(lowercase )
try:
shutil.copyfile(lowercase , lowercase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A_ : Optional[Any] = self.model_save_dir.joinpath(lowercase )
if src_path.exists():
A_ : int = Path(lowercase ).joinpath(lowercase )
try:
shutil.copyfile(lowercase , lowercase )
except shutil.SameFileError:
pass
def lowerCAmelCase_ ( self , lowercase , **lowercase , ):
"""simple docstring"""
if os.path.isfile(lowercase ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(lowercase , exist_ok=lowercase )
# saving model weights/files
self._save_pretrained(lowercase , **lowercase )
@classmethod
def lowerCAmelCase_ ( cls , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ):
"""simple docstring"""
A_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowercase ):
A_ : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(lowercase , lowercase ) , provider=lowercase , sess_options=lowercase )
A_ : Dict = Path(lowercase )
# load model from hub
else:
# download model
A_ : List[str] = hf_hub_download(
repo_id=lowercase , filename=lowercase , use_auth_token=lowercase , revision=lowercase , cache_dir=lowercase , force_download=lowercase , )
A_ : int = Path(lowercase ).parent
A_ : Optional[Any] = Path(lowercase ).name
A_ : Any = OnnxRuntimeModel.load_model(lowercase , provider=lowercase , sess_options=lowercase )
return cls(model=lowercase , **lowercase )
@classmethod
def lowerCAmelCase_ ( cls , lowercase , lowercase = True , lowercase = None , lowercase = None , **lowercase , ):
"""simple docstring"""
A_ : List[Any] = None
if len(str(lowercase ).split('@' ) ) == 2:
A_ , A_ : int = model_id.split('@' )
return cls._from_pretrained(
model_id=lowercase , revision=lowercase , cache_dir=lowercase , force_download=lowercase , use_auth_token=lowercase , **lowercase , )
| 192 | 1 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( a__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = AlbertTokenizer
SCREAMING_SNAKE_CASE_ = AlbertTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = AlbertTokenizer(__UpperCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, lowerCAmelCase__) -> int:
snake_case_ = 'this is a test'
snake_case_ = 'this is a test'
return input_text, output_text
def a_ ( self) -> str:
snake_case_ = '<pad>'
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase), __UpperCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase), __UpperCAmelCase)
def a_ ( self) -> List[Any]:
snake_case_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<pad>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '▁eloquent')
self.assertEqual(len(__UpperCAmelCase), 3_0000)
def a_ ( self) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size, 3_0000)
def a_ ( self) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = tokenizer.tokenize(__UpperCAmelCase)
snake_case_ = rust_tokenizer.tokenize(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase, __UpperCAmelCase)
snake_case_ = tokenizer.encode(__UpperCAmelCase, add_special_tokens=__UpperCAmelCase)
snake_case_ = rust_tokenizer.encode(__UpperCAmelCase, add_special_tokens=__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase, __UpperCAmelCase)
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(__UpperCAmelCase)
snake_case_ = rust_tokenizer.encode(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase, __UpperCAmelCase)
def a_ ( self) -> Dict:
snake_case_ = AlbertTokenizer(__UpperCAmelCase, keep_accents=__UpperCAmelCase)
snake_case_ = tokenizer.tokenize('This is a test')
self.assertListEqual(__UpperCAmelCase, ['▁this', '▁is', '▁a', '▁test'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase), [48, 25, 21, 1289])
snake_case_ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
__UpperCAmelCase, ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'])
snake_case_ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase, [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9])
snake_case_ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase, ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'], )
def a_ ( self) -> Optional[Any]:
snake_case_ = AlbertTokenizer(__UpperCAmelCase)
snake_case_ = tokenizer.encode('sequence builders')
snake_case_ = tokenizer.encode('multi-sequence build')
snake_case_ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase)
snake_case_ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase, __UpperCAmelCase)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a_ ( self) -> List[Any]:
# fmt: off
snake_case_ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase, model_name='albert-base-v2', revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e', )
| 69 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[Any] = 'gptj'
A_ : Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __UpperCAmelCase=50400 , __UpperCAmelCase=2048 , __UpperCAmelCase=4096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Union[str, Any]:
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = rotary_dim
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = use_cache
_a = bos_token_id
_a = eos_token_id
super().__init__(
bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Optional[Any]:
super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , __UpperCAmelCase ):
# TODO: how to do that better?
_a = 0
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
_a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
_a = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _UpperCAmelCase ( self ) -> int:
return self._config.n_layer
@property
def _UpperCAmelCase ( self ) -> int:
return self._config.n_head
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
_a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
_a = common_inputs['''attention_mask''']
if self.use_past:
_a = ordered_inputs['''attention_mask'''].dtype
_a = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self ) -> int:
return 13 | 320 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'van'
def __init__( self , _snake_case=224 , _snake_case=3 , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[64, 128, 320, 512] , _snake_case=[3, 3, 12, 3] , _snake_case=[8, 8, 4, 4] , _snake_case="gelu" , _snake_case=0.02 , _snake_case=1e-6 , _snake_case=1e-2 , _snake_case=0.0 , _snake_case=0.0 , **_snake_case , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = patch_sizes
UpperCAmelCase = strides
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = mlp_ratios
UpperCAmelCase = hidden_act
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = drop_path_rate
UpperCAmelCase = dropout_rate
| 358 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
__magic_name__ = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
__magic_name__ = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def _lowerCAmelCase ( A__: List[Any] , A__: int ):
'''simple docstring'''
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = collections.OrderedDict()
UpperCAmelCase = collections.OrderedDict()
UpperCAmelCase = collections.OrderedDict()
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(A__ ):
UpperCAmelCase = b
UpperCAmelCase = idx
for wd in b:
UpperCAmelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case="<|startoftext|>" , _snake_case="<|endoftext|>" , _snake_case=False , **_snake_case , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(_snake_case ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
UpperCAmelCase = do_clean_text
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = load_vocab_and_emoji(_snake_case , _snake_case )
UpperCAmelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def snake_case_ ( self ) -> Any:
"""simple docstring"""
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def snake_case_ ( self , _snake_case ) -> List[Any]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text )
def snake_case_ ( self , _snake_case ) -> Dict:
"""simple docstring"""
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def snake_case_ ( self , _snake_case ) -> Optional[int]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(_snake_case )
def snake_case_ ( self , _snake_case ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = ''''''.join(_snake_case ).strip()
return out_string
def snake_case_ ( self , _snake_case ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = 0
if os.path.isdir(_snake_case ):
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
UpperCAmelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
UpperCAmelCase = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase = token_index
writer.write(''','''.join(_snake_case ) + '''\n''' )
index += 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , _snake_case )
return vocab_file, emoji_file
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = vocab # same as swe
UpperCAmelCase = ids_to_tokens # same as bpe
UpperCAmelCase = emoji
UpperCAmelCase = np.max([len(_snake_case ) for w in self.vocab.keys()] )
UpperCAmelCase = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
UpperCAmelCase = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
UpperCAmelCase = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
UpperCAmelCase = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
UpperCAmelCase = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
UpperCAmelCase = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
UpperCAmelCase = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
UpperCAmelCase = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
UpperCAmelCase = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__( self ) -> Dict:
"""simple docstring"""
return len(self.ids_to_tokens )
def snake_case_ ( self , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = self.content_repattera.sub('''<URL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<EMAIL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<TEL>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<DATE>''' , _snake_case )
UpperCAmelCase = self.content_repattera.sub('''<PRICE>''' , _snake_case )
UpperCAmelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def snake_case_ ( self , _snake_case , _snake_case=False ) -> str:
"""simple docstring"""
UpperCAmelCase = text.replace(''' ''' , '''<SP>''' )
UpperCAmelCase = text.replace(''' ''' , '''<SP>''' )
UpperCAmelCase = text.replace('''\r\n''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\n''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\r''' , '''<BR>''' )
UpperCAmelCase = text.replace('''\t''' , '''<TAB>''' )
UpperCAmelCase = text.replace('''—''' , '''ー''' )
UpperCAmelCase = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase = text.replace(_snake_case , _snake_case )
if clean:
UpperCAmelCase = self.clean_text(_snake_case )
def check_simbol(_snake_case ):
UpperCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 2:
UpperCAmelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(_snake_case ):
UpperCAmelCase = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 3:
UpperCAmelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28080 and c <= 0XE2B07F:
return True
return False
UpperCAmelCase = 0
UpperCAmelCase = []
while pos < len(_snake_case ):
UpperCAmelCase = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
UpperCAmelCase = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1 ):
UpperCAmelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case ) > 2:
UpperCAmelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_snake_case ) > 0:
# the smallest token_id is adopted
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = sorted(_snake_case , key=lambda _snake_case : x[0] )[0]
result.append(_snake_case )
UpperCAmelCase = e
else:
UpperCAmelCase = pos + 1
UpperCAmelCase = text[pos:end]
if check_simbol(_snake_case ):
result.append('''<KIGOU>''' )
elif checkuae(_snake_case ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
UpperCAmelCase = end
return result
def snake_case_ ( self , _snake_case , _snake_case="\n" ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('''utf-8''' , errors='''replace''' ) )
UpperCAmelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(_snake_case )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(_snake_case )
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode('''utf-8''' , errors='''replace''' ) )
UpperCAmelCase = ''''''.join(_snake_case )
return text
| 152 | 0 |
A : str = tuple[float, float, float]
A : int = tuple[float, float, float]
def __lowerCamelCase ( __a :Pointad , __a :Pointad ) -> Vectorad:
"""simple docstring"""
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __lowerCamelCase ( __a :Vectorad , __a :Vectorad ) -> Vectorad:
"""simple docstring"""
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __lowerCamelCase ( __a :Vectorad , __a :int ) -> bool:
"""simple docstring"""
return tuple(round(__a , __a ) for x in vector ) == (0, 0, 0)
def __lowerCamelCase ( __a :Pointad , __a :Pointad , __a :Pointad , __a :int = 1_0 ) -> bool:
"""simple docstring"""
A__ = create_vector(__a , __a )
A__ = create_vector(__a , __a )
return is_zero_vector(get_ad_vectors_cross(__a , __a ) , __a )
| 274 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
A : Dict = Lock()
def __lowerCamelCase ( __a :Dict , __a :List[str] , __a :Optional[int] , __a :Optional[int] , __a :Optional[Any] , __a :Optional[int] , __a :int ) -> Dict:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A__ = min(__a , __a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A__ = max(__a , __a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__a )
def __lowerCamelCase ( __a :List[str] ) -> int:
"""simple docstring"""
A__ = []
A__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A__ = Pipe()
A__ = Pipe()
process_array_.append(
Process(
target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A__ = temp_rs
A__ = temp_rr
for i in range(1 , len(__a ) - 1 ):
A__ = Pipe()
A__ = Pipe()
process_array_.append(
Process(
target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A__ = temp_rs
A__ = temp_rr
process_array_.append(
Process(
target=__a , args=(
len(__a ) - 1,
arr[len(__a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__a ) ):
A__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ = list(range(1_0 , 0 , -1 ) )
print("""Initial List""" )
print(*__a )
A__ = odd_even_transposition(__a )
print("""Sorted List\n""" )
print(*__a )
if __name__ == "__main__":
main()
| 274 | 1 |
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class SCREAMING_SNAKE_CASE__ ( yaml.SafeLoader ):
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase_ = [tuple(lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_) else key for key in keys]
lowercase_ = Counter(lowerCAmelCase_)
lowercase_ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''')
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict=False):
"""simple docstring"""
lowercase_ = super().construct_mapping(lowerCAmelCase_ , deep=lowerCAmelCase_)
self._check_no_duplicates_on_constructed_node(lowerCAmelCase_)
return mapping
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple[Optional[str], str]:
lowercase_ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase_ = full_content[1:].index("""---""" ) + 1
lowercase_ = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
# class attributes
lowercase__ = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def _UpperCAmelCase ( cls : str , lowerCAmelCase_ : Path):
"""simple docstring"""
with open(lowerCAmelCase_ , encoding="""utf-8""") as readme_file:
lowercase_ = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase_)
else:
return cls()
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Path):
"""simple docstring"""
if path.exists():
with open(lowerCAmelCase_ , encoding="""utf-8""") as readme_file:
lowercase_ = readme_file.read()
else:
lowercase_ = None
lowercase_ = self._to_readme(lowerCAmelCase_)
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""") as readme_file:
readme_file.write(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if readme_content is not None:
lowercase_ = _split_yaml_from_readme(lowerCAmelCase_)
lowercase_ = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowercase_ = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def _UpperCAmelCase ( cls : Union[str, Any] , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = yaml.load(lowerCAmelCase_ , Loader=_NoDuplicateSafeLoader) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase_ = {
(key.replace("""-""" , """_""") if key.replace("""-""" , """_""") in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("""_""" , """-""") if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase_ , allow_unicode=lowerCAmelCase_ , encoding="""utf-8""" , ).decode("""utf-8""")
UpperCAmelCase : Tuple = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCAmelCase : Tuple = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
UpperCAmelCase : str = ap.parse_args()
UpperCAmelCase : Optional[int] = Path(args.readme_filepath)
UpperCAmelCase : Union[str, Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 351 |
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = None
lowercase__ = None
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = True
lowercase__ = None
lowercase__ = 1
lowercase__ = None
lowercase__ = False
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase_) for k, v in self.__dict__.items()})
| 313 | 0 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def A ( _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = tmp_path / 'cache'
_UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = SqlDatasetReader(
'dataset' , 'sqlite:///' + sqlite_path , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase , _UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / 'cache'
_UpperCAmelCase = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
with contextlib.closing(sqlitea.connect(_UpperCAmelCase ) ) as con:
_UpperCAmelCase = con.cursor()
cur.execute('SELECT * FROM dataset' )
for row in cur:
yield row
@require_sqlalchemy
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = tmp_path / 'cache'
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'tmp.sql' )
_UpperCAmelCase = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write()
_UpperCAmelCase = iter_sql_file(_UpperCAmelCase )
_UpperCAmelCase = iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase , _UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
_UpperCAmelCase = tmp_path / 'cache'
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'tmp.sql' )
_UpperCAmelCase = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write()
_UpperCAmelCase = iter_sql_file(_UpperCAmelCase )
_UpperCAmelCase = iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase , _UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / 'cache'
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'tmp.sql' )
_UpperCAmelCase = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=_UpperCAmelCase ).read()
with pytest.raises(_UpperCAmelCase ):
SqlDatasetWriter(_UpperCAmelCase , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
| 339 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCamelCase_ : Dict = logging.getLogger(__name__)
lowerCamelCase_ : Tuple = """pytorch_model.bin"""
@dataclasses.dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."}, )
@dataclasses.dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
__lowerCAmelCase = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "A csv or a json file containing the validation data."} )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "The name of the task to train on."}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
__lowerCAmelCase = dataclasses.field(
default="accuracy", metadata={"help": "The evaluation metric used for the task."} )
__lowerCAmelCase = dataclasses.field(
default="no", metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
}, )
__lowerCAmelCase = dataclasses.field(
default=10, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, )
__lowerCAmelCase = dataclasses.field(
default=0.0, metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Whether to fine-tune on labeled data after pseudo training."}, )
__lowerCAmelCase = dataclasses.field(
default=0.0, metadata={"help": "Confidence threshold for pseudo-labeled data filtering."}, )
__lowerCAmelCase = dataclasses.field(
default=100, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Random seed for initialization."}, )
def _A ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
a =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
a =dataset.filter(lambda lowercase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
a =int(eval_result * len(lowercase ) )
print(lowercase )
a =dataset.sort('''probability''' , reverse=lowercase )
a =dataset.select(range(lowercase ) )
a =dataset.remove_columns(['''label''', '''probability'''] )
a =dataset.rename_column('''prediction''' , '''label''' )
a =dataset.map(lambda lowercase : {"label": idalabel[example["label"]]} )
a =dataset.shuffle(seed=args.seed )
a =os.path.join(lowercase , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowercase , index=lowercase )
else:
dataset.to_json(lowercase )
def _A ( lowercase , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
a =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
a =STModelArguments(model_name_or_path=lowercase )
a =STDataArguments(train_file=lowercase , infer_file=lowercase )
a =STTrainingArguments(output_dir=lowercase )
a =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowercase ).items():
setattr(lowercase , lowercase , lowercase )
for key, value in kwargs.items():
if hasattr(lowercase , lowercase ):
setattr(lowercase , lowercase , lowercase )
# Sanity checks
a ={}
a =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
a =args.train_file
a =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
a =args.eval_file
for key in data_files:
a =data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
a =extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
a =f'''{args.output_dir}/self-train_iter-{{}}'''.format
a =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
accelerator.wait_for_everyone()
a =None
a =None
a =0
a =False
# Show the progress bar
a =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
a =data_dir_format(lowercase )
assert os.path.exists(lowercase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
a =os.path.join(lowercase , '''stage-1''' )
a ={
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowercase , lowercase ):
arguments_dict.update({key: value} )
a =os.path.join(lowercase , '''best-checkpoint''' , lowercase )
if os.path.exists(lowercase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , lowercase , lowercase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , lowercase )
finetune(**lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , lowercase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
a =os.path.join(lowercase , '''best-checkpoint''' )
a =os.path.join(lowercase , '''stage-2''' )
# Update arguments_dict
a =model_path
a =data_files['''train''']
a =current_output_dir
a =os.path.join(lowercase , '''best-checkpoint''' , lowercase )
if os.path.exists(lowercase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , lowercase , lowercase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , lowercase )
finetune(**lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , lowercase )
a =iteration
a =data_dir_format(iteration + 1 )
a =AutoConfig.from_pretrained(os.path.join(lowercase , '''best-checkpoint''' ) )
a =config.idalabel
a =os.path.join(lowercase , '''eval_results_best-checkpoint.json''' )
a =os.path.join(lowercase , '''test_results_best-checkpoint.json''' )
assert os.path.exists(lowercase )
with open(lowercase , '''r''' ) as f:
a =float(json.load(lowercase )[args.eval_metric] )
a =os.path.join(lowercase , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(lowercase )
# Loading the dataset from local csv or json files.
a =load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
a =load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(lowercase , exist_ok=lowercase )
shutil.copy(lowercase , os.path.join(lowercase , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowercase ):
shutil.copy(lowercase , os.path.join(lowercase , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
accelerator.wait_for_everyone()
a =os.path.join(lowercase , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
a =eval_result
if best_iteration is None:
a =new_iteration
a =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
a =new_iteration
a =new_eval_result
a =0
else:
if new_eval_result == best_eval_result:
a =new_iteration
a =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
a =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , lowercase )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowercase , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowercase , '''eval_results_best-iteration.json''' ) , ) | 81 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def A_ ( self : Dict ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def A_ ( self : Dict ):
torch.manual_seed(0 )
UpperCamelCase__ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def A_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(_a )
def A_ ( self : Optional[int] ):
UpperCamelCase__ = self.dummy_uncond_unet
UpperCamelCase__ = DDIMScheduler()
UpperCamelCase__ = self.dummy_vq_model
UpperCamelCase__ = LDMPipeline(unet=_a , vqvae=_a , scheduler=_a )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = ldm(generator=_a , num_inference_steps=2 , output_type='''numpy''' ).images
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = ldm(generator=_a , num_inference_steps=2 , output_type='''numpy''' , return_dict=_a )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase__ = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : Optional[int] ):
UpperCamelCase__ = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = ldm(generator=_a , num_inference_steps=5 , output_type='''numpy''' ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase__ = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase__ = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 364 | import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class __lowercase ( A ):
'''simple docstring'''
_A : int = '''align_text_model'''
def __init__( self : Tuple , _a : Tuple=30_522 , _a : str=768 , _a : Tuple=12 , _a : Dict=12 , _a : Any=3_072 , _a : str="gelu" , _a : int=0.1 , _a : Optional[Any]=0.1 , _a : int=512 , _a : List[str]=2 , _a : Any=0.02 , _a : Dict=1E-12 , _a : Tuple=0 , _a : Optional[Any]="absolute" , _a : str=True , **_a : Union[str, Any] , ):
super().__init__(**_a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = pad_token_id
@classmethod
def A_ ( cls : List[str] , _a : Union[str, os.PathLike] , **_a : Any ):
cls._set_token_in_kwargs(_a )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
UpperCamelCase__ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class __lowercase ( A ):
'''simple docstring'''
_A : List[Any] = '''align_vision_model'''
def __init__( self : List[str] , _a : int = 3 , _a : int = 600 , _a : float = 2.0 , _a : float = 3.1 , _a : int = 8 , _a : List[int] = [3, 3, 5, 3, 5, 5, 3] , _a : List[int] = [32, 16, 24, 40, 80, 112, 192] , _a : List[int] = [16, 24, 40, 80, 112, 192, 320] , _a : List[int] = [] , _a : List[int] = [1, 2, 2, 2, 1, 2, 1] , _a : List[int] = [1, 2, 2, 3, 3, 4, 1] , _a : List[int] = [1, 6, 6, 6, 6, 6, 6] , _a : float = 0.25 , _a : str = "swish" , _a : int = 2_560 , _a : str = "mean" , _a : float = 0.02 , _a : float = 0.001 , _a : float = 0.99 , _a : float = 0.2 , **_a : List[Any] , ):
super().__init__(**_a )
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = width_coefficient
UpperCamelCase__ = depth_coefficient
UpperCamelCase__ = depth_divisor
UpperCamelCase__ = kernel_sizes
UpperCamelCase__ = in_channels
UpperCamelCase__ = out_channels
UpperCamelCase__ = depthwise_padding
UpperCamelCase__ = strides
UpperCamelCase__ = num_block_repeats
UpperCamelCase__ = expand_ratios
UpperCamelCase__ = squeeze_expansion_ratio
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dim
UpperCamelCase__ = pooling_type
UpperCamelCase__ = initializer_range
UpperCamelCase__ = batch_norm_eps
UpperCamelCase__ = batch_norm_momentum
UpperCamelCase__ = drop_connect_rate
UpperCamelCase__ = sum(_a ) * 4
@classmethod
def A_ ( cls : Tuple , _a : Union[str, os.PathLike] , **_a : Union[str, Any] ):
cls._set_token_in_kwargs(_a )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
UpperCamelCase__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class __lowercase ( A ):
'''simple docstring'''
_A : List[Any] = '''align'''
_A : Optional[int] = True
def __init__( self : Optional[int] , _a : Tuple=None , _a : int=None , _a : Any=640 , _a : Optional[Any]=1.0 , _a : Tuple=0.02 , **_a : List[Any] , ):
super().__init__(**_a )
if text_config is None:
UpperCamelCase__ = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
UpperCamelCase__ = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
UpperCamelCase__ = AlignTextConfig(**_a )
UpperCamelCase__ = AlignVisionConfig(**_a )
UpperCamelCase__ = projection_dim
UpperCamelCase__ = temperature_init_value
UpperCamelCase__ = initializer_range
@classmethod
def A_ ( cls : Optional[int] , _a : AlignTextConfig , _a : AlignVisionConfig , **_a : Optional[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def A_ ( self : Tuple ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.text_config.to_dict()
UpperCamelCase__ = self.vision_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 35 | 0 |
from string import ascii_lowercase, ascii_uppercase
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if not sentence:
return ""
lowercase : str = dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 20 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class _snake_case ( _a ):
_A : Optional[int] = '''t5'''
_A : Union[str, Any] = ['''past_key_values''']
_A : Dict = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]=32_128 ,SCREAMING_SNAKE_CASE__ : List[str]=512 ,SCREAMING_SNAKE_CASE__ : Any=64 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_048 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6 ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Dict=8 ,SCREAMING_SNAKE_CASE__ : Optional[int]=32 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=128 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=1e-6 ,SCREAMING_SNAKE_CASE__ : str=1.0 ,SCREAMING_SNAKE_CASE__ : int="relu" ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : Dict=0 ,SCREAMING_SNAKE_CASE__ : Tuple=1 ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
SCREAMING_SNAKE_CASE:int = vocab_size
SCREAMING_SNAKE_CASE:Any = d_model
SCREAMING_SNAKE_CASE:Union[str, Any] = d_kv
SCREAMING_SNAKE_CASE:Optional[int] = d_ff
SCREAMING_SNAKE_CASE:Tuple = num_layers
SCREAMING_SNAKE_CASE:str = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE:Union[str, Any] = num_heads
SCREAMING_SNAKE_CASE:int = relative_attention_num_buckets
SCREAMING_SNAKE_CASE:Tuple = relative_attention_max_distance
SCREAMING_SNAKE_CASE:Dict = dropout_rate
SCREAMING_SNAKE_CASE:List[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE:List[str] = initializer_factor
SCREAMING_SNAKE_CASE:Tuple = feed_forward_proj
SCREAMING_SNAKE_CASE:str = use_cache
SCREAMING_SNAKE_CASE:Optional[Any] = self.feed_forward_proj.split("-" )
SCREAMING_SNAKE_CASE:Any = act_info[-1]
SCREAMING_SNAKE_CASE:Tuple = act_info[0] == "gated"
if len(SCREAMING_SNAKE_CASE__ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE:int = "gelu_new"
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
class _snake_case ( _a ):
@property
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:int = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
SCREAMING_SNAKE_CASE:Optional[int] = "past_encoder_sequence + sequence"
SCREAMING_SNAKE_CASE:str = {0: "batch"}
SCREAMING_SNAKE_CASE:List[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE:Tuple = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE:List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ ,direction="inputs" )
return common_inputs
@property
def __UpperCamelCase ( self : Optional[int] ):
return 13
| 139 | 0 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : list ):
if len(UpperCamelCase__ ) <= 1:
return [tuple(UpperCamelCase__ )]
_UpperCAmelCase : Dict = []
def generate(UpperCamelCase__ : int , UpperCamelCase__ : list ):
_UpperCAmelCase : int = [0] * n
res.append(tuple(UpperCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = arr[i], arr[0]
else:
_UpperCAmelCase , _UpperCAmelCase : int = arr[i], arr[c[i]]
res.append(tuple(UpperCamelCase__ ) )
c[i] += 1
_UpperCAmelCase : int = 0
else:
_UpperCAmelCase : Dict = 0
i += 1
generate(len(UpperCamelCase__ ) , UpperCamelCase__ )
return res
if __name__ == "__main__":
_lowerCAmelCase :str = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase :List[str] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 68 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=a ):
'''simple docstring'''
a__ =['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *A , **A ) -> int:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 68 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_snake_case : List[str] = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def a_ ( lowerCAmelCase_ : str = "dhaka", lowerCAmelCase_ : int = 5 ):
__lowerCAmelCase = min(__UpperCAmelCase, 50 ) # Prevent abuse!
__lowerCAmelCase = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__lowerCAmelCase = requests.get('https://www.google.com/search', params=__UpperCAmelCase, headers=__UpperCAmelCase )
__lowerCAmelCase = BeautifulSoup(html.text, 'html.parser' )
__lowerCAmelCase = ''.join(
re.findall(R'AF_initDataCallback\(([^<]+)\);', str(soup.select('script' ) ) ) )
__lowerCAmelCase = json.dumps(__UpperCAmelCase )
__lowerCAmelCase = json.loads(__UpperCAmelCase )
__lowerCAmelCase = re.findall(
R'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",', __UpperCAmelCase, )
if not matched_google_image_data:
return 0
__lowerCAmelCase = re.sub(
R'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]', '', str(__UpperCAmelCase ), )
__lowerCAmelCase = re.findall(
R'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]', __UpperCAmelCase, )
for index, fixed_full_res_image in enumerate(__UpperCAmelCase ):
if index >= max_images:
return index
__lowerCAmelCase = bytes(__UpperCAmelCase, 'ascii' ).decode(
'unicode-escape' )
__lowerCAmelCase = bytes(__UpperCAmelCase, 'ascii' ).decode(
'unicode-escape' )
__lowerCAmelCase = urllib.request.build_opener()
__lowerCAmelCase = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(__UpperCAmelCase )
__lowerCAmelCase = F"""query_{query.replace(" ", "_" )}"""
if not os.path.exists(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
urllib.request.urlretrieve( # noqa: S310
__UpperCAmelCase, F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
_snake_case : int = download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print('Please provide a search term.')
raise
| 284 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Any="pt" ) -> List[str]:
SCREAMING_SNAKE_CASE_ = {'add_prefix_space': True} if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not line.startswith(' ' ) else {}
SCREAMING_SNAKE_CASE_ = padding_side
return tokenizer(
[line] , max_length=__UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = input_ids.ne(__UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any]="train" , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Optional[int]="" , ):
super().__init__()
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ).joinpath(type_path + '.source' )
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase ).joinpath(type_path + '.target' )
SCREAMING_SNAKE_CASE_ = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE_ = max_source_length
SCREAMING_SNAKE_CASE_ = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE_ = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE_ = src_lang
SCREAMING_SNAKE_CASE_ = tgt_lang
def __len__( self : Tuple ):
return len(self.src_lens )
def __getitem__( self : List[str] , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE_ = self.prefix + linecache.getline(str(self.src_file ) , _lowerCAmelCase ).rstrip('\n' )
SCREAMING_SNAKE_CASE_ = linecache.getline(str(self.tgt_file ) , _lowerCAmelCase ).rstrip('\n' )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowerCAmelCase ) else self.tokenizer
)
SCREAMING_SNAKE_CASE_ = self.tokenizer.generator if isinstance(self.tokenizer , _lowerCAmelCase ) else self.tokenizer
SCREAMING_SNAKE_CASE_ = encode_line(_lowerCAmelCase , _lowerCAmelCase , self.max_source_length , 'right' )
SCREAMING_SNAKE_CASE_ = encode_line(_lowerCAmelCase , _lowerCAmelCase , self.max_target_length , 'right' )
SCREAMING_SNAKE_CASE_ = source_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ = target_inputs['input_ids'].squeeze()
SCREAMING_SNAKE_CASE_ = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase_ ( _lowerCAmelCase : Optional[int] ):
return [len(_lowerCAmelCase ) for x in Path(_lowerCAmelCase ).open().readlines()]
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_ = torch.stack([x['input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ = torch.stack([x['attention_mask'] for x in batch] )
SCREAMING_SNAKE_CASE_ = torch.stack([x['decoder_input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowerCAmelCase )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowerCAmelCase )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ = trim_batch(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = trim_batch(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCamelCase__ : List[str] = getLogger(__name__)
def UpperCAmelCase_ ( __UpperCAmelCase : List[List] ) -> Tuple:
return list(itertools.chain.from_iterable(__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> None:
SCREAMING_SNAKE_CASE_ = get_git_info()
save_json(__UpperCAmelCase , os.path.join(__UpperCAmelCase , 'git_log.json' ) )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int]=4 , **__UpperCAmelCase : Tuple ) -> str:
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> int:
with open(__UpperCAmelCase ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase_ ( ) -> Tuple:
SCREAMING_SNAKE_CASE_ = git.Repo(search_parent_directories=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = {
'repo_id': str(__UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase_ ( __UpperCAmelCase : Callable , __UpperCAmelCase : Iterable ) -> List:
return list(map(__UpperCAmelCase , __UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Dict ) -> Dict:
with open(__UpperCAmelCase , 'wb' ) as f:
return pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Any:
def remove_articles(__UpperCAmelCase : Any ):
return re.sub(r'\b(a|an|the)\b' , ' ' , __UpperCAmelCase )
def white_space_fix(__UpperCAmelCase : List[str] ):
return " ".join(text.split() )
def remove_punc(__UpperCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCAmelCase : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCAmelCase ) ) ) )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = normalize_answer(__UpperCAmelCase ).split()
SCREAMING_SNAKE_CASE_ = normalize_answer(__UpperCAmelCase ).split()
SCREAMING_SNAKE_CASE_ = Counter(__UpperCAmelCase ) & Counter(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE_ = 1.0 * num_same / len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = 1.0 * num_same / len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> Any:
return normalize_answer(__UpperCAmelCase ) == normalize_answer(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] ) -> Dict:
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = 0
for hypo, pred in zip(__UpperCAmelCase , __UpperCAmelCase ):
em += exact_match_score(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
em /= len(__UpperCAmelCase )
return {"em": em}
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Dict:
return model_prefix.startswith('rag' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE_ = 'dropout_rate'
for p in extra_params:
if getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if not hasattr(__UpperCAmelCase , __UpperCAmelCase ) and not hasattr(__UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__UpperCAmelCase ) )
delattr(__UpperCAmelCase , __UpperCAmelCase )
continue
SCREAMING_SNAKE_CASE_ = p if hasattr(__UpperCAmelCase , __UpperCAmelCase ) else equivalent_param[p]
setattr(__UpperCAmelCase , __UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
delattr(__UpperCAmelCase , __UpperCAmelCase )
return hparams, config | 225 | 0 |
'''simple docstring'''
import os
def UpperCAmelCase_ () -> Optional[int]:
"""simple docstring"""
_a : Any = os.path.dirname(os.path.realpath(__a ) )
_a : Optional[int] = os.path.join(__a , 'triangle.txt' )
with open(__a ) as f:
_a : int = f.readlines()
_a : str = []
for line in triangle:
_a : str = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(__a ) )
a.append(__a )
for i in range(1 , len(__a ) ):
for j in range(len(a[i] ) ):
_a : List[Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
_a : int = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__a , __a )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 367 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
_a, _a : str = FlaxStableDiffusionInpaintPipeline.from_pretrained(_a ,safety_checker=_a )
_a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = jax.random.PRNGKey(0 )
_a : Tuple = 50
_a : Any = jax.device_count()
_a : Dict = num_samples * [prompt]
_a : Optional[Any] = num_samples * [init_image]
_a : str = num_samples * [mask_image]
_a, _a, _a : Optional[Any] = pipeline.prepare_inputs(_a ,_a ,_a )
# shard inputs and rng
_a : Optional[Any] = replicate(_a )
_a : str = jax.random.split(_a ,jax.device_count() )
_a : Dict = shard(_a )
_a : int = shard(_a )
_a : int = shard(_a )
_a : Union[str, Any] = pipeline(
_a ,_a ,_a ,_a ,_a ,_a ,jit=_a )
_a : Union[str, Any] = output.images.reshape(_a ,512 ,512 ,3 )
_a : Union[str, Any] = images[0, 253:256, 253:256, -1]
_a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_a : Union[str, Any] = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 5 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=18, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, ) -> Union[str, Any]:
UpperCamelCase : str = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : int = num_channels
UpperCamelCase : Any = image_size
UpperCamelCase : Optional[int] = min_resolution
UpperCamelCase : Optional[Any] = max_resolution
UpperCamelCase : Union[str, Any] = do_resize
UpperCamelCase : List[Any] = size
UpperCamelCase : int = do_normalize
def snake_case_ ( self ) -> Tuple:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = ImageGPTImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> int:
UpperCamelCase : str = ImageGPTImageProcessingTester(self )
@property
def snake_case_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'clusters' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_normalize' ) )
def snake_case_ ( self ) -> str:
UpperCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'height': 18, 'width': 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'height': 42, 'width': 42} )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase : int = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, obj[key] ) )
else:
self.assertEqual(obj[key], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE_, 'image_processor.json' )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE_ ).to_dict()
UpperCamelCase : List[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE_ ).to_dict()
UpperCamelCase : Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key], SCREAMING_SNAKE_CASE_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def snake_case_ ( self ) -> str:
pass
def UpperCamelCase ( ) -> int:
UpperCamelCase : Optional[int] = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
UpperCamelCase : int = Image.open(dataset[4]['file'] )
UpperCamelCase : Optional[Any] = Image.open(dataset[5]['file'] )
UpperCamelCase : str = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> str:
UpperCamelCase : List[str] = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
UpperCamelCase : List[str] = prepare_images()
# test non-batched
UpperCamelCase : int = image_processing(images[0], return_tensors='pt' )
self.assertIsInstance(encoding.input_ids, torch.LongTensor )
self.assertEqual(encoding.input_ids.shape, (1, 1024) )
UpperCamelCase : Union[str, Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist(), SCREAMING_SNAKE_CASE_ )
# test batched
UpperCamelCase : Tuple = image_processing(SCREAMING_SNAKE_CASE_, return_tensors='pt' )
self.assertIsInstance(encoding.input_ids, torch.LongTensor )
self.assertEqual(encoding.input_ids.shape, (2, 1024) )
UpperCamelCase : Optional[Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist(), SCREAMING_SNAKE_CASE_ )
| 119 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[str] = "codegen"
UpperCAmelCase__ : str = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, SCREAMING_SNAKE_CASE_=5_0400, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=4096, SCREAMING_SNAKE_CASE_=28, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="gelu_new", SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : Optional[int] = n_ctx
UpperCamelCase : Optional[int] = n_positions
UpperCamelCase : List[str] = n_embd
UpperCamelCase : Dict = n_layer
UpperCamelCase : int = n_head
UpperCamelCase : Union[str, Any] = n_inner
UpperCamelCase : int = rotary_dim
UpperCamelCase : Optional[Any] = activation_function
UpperCamelCase : Optional[int] = resid_pdrop
UpperCamelCase : Union[str, Any] = embd_pdrop
UpperCamelCase : Optional[Any] = attn_pdrop
UpperCamelCase : List[str] = layer_norm_epsilon
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : str = use_cache
UpperCamelCase : Dict = bos_token_id
UpperCamelCase : Union[str, Any] = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, tie_word_embeddings=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "default", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_, task=SCREAMING_SNAKE_CASE_, patching_specs=SCREAMING_SNAKE_CASE_, use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config, 'pad_token_id', SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
UpperCamelCase : str = 0
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase : Tuple = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_, direction='inputs' )
UpperCamelCase : List[Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def snake_case_ ( self ) -> int:
return self._config.n_layer
@property
def snake_case_ ( self ) -> int:
return self._config.n_head
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> Mapping[str, Any]:
UpperCamelCase : Tuple = super(SCREAMING_SNAKE_CASE_, self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_, batch_size=SCREAMING_SNAKE_CASE_, seq_length=SCREAMING_SNAKE_CASE_, is_pair=SCREAMING_SNAKE_CASE_, framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase : Optional[int] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase : List[str] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase : List[Any] = seqlen + 2
UpperCamelCase : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase : str = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
UpperCamelCase : List[Any] = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase : Optional[Any] = ordered_inputs['attention_mask'].dtype
UpperCamelCase : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )], dim=1 )
return ordered_inputs
@property
def snake_case_ ( self ) -> int:
return 13
| 119 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = f"{sampling_rate}"
lowerCAmelCase : Union[str, Any] = '1'
lowerCAmelCase : Tuple = 'f32le'
lowerCAmelCase : Optional[Any] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(_UpperCAmelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCAmelCase : Optional[int] = ffmpeg_process.communicate(_UpperCAmelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowerCAmelCase : str = output_stream[0]
lowerCAmelCase : List[str] = np.frombuffer(_UpperCAmelCase, np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = "f32le", ):
'''simple docstring'''
lowerCAmelCase : List[str] = f"{sampling_rate}"
lowerCAmelCase : Union[str, Any] = '1'
if format_for_conversion == "s16le":
lowerCAmelCase : Optional[int] = 2
elif format_for_conversion == "f32le":
lowerCAmelCase : Tuple = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
lowerCAmelCase : Optional[int] = platform.system()
if system == "Linux":
lowerCAmelCase : Union[str, Any] = 'alsa'
lowerCAmelCase : Optional[Any] = 'default'
elif system == "Darwin":
lowerCAmelCase : Optional[int] = 'avfoundation'
lowerCAmelCase : str = ':0'
elif system == "Windows":
lowerCAmelCase : Optional[Any] = 'dshow'
lowerCAmelCase : List[Any] = 'default'
lowerCAmelCase : Optional[Any] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowerCAmelCase : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCAmelCase : Any = _ffmpeg_stream(_UpperCAmelCase, _UpperCAmelCase )
for item in iterator:
yield item
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = "f32le", ):
'''simple docstring'''
if stream_chunk_s is not None:
lowerCAmelCase : Union[str, Any] = stream_chunk_s
else:
lowerCAmelCase : Tuple = chunk_length_s
lowerCAmelCase : Union[str, Any] = ffmpeg_microphone(_UpperCAmelCase, _UpperCAmelCase, format_for_conversion=_UpperCAmelCase )
if format_for_conversion == "s16le":
lowerCAmelCase : Any = np.intaa
lowerCAmelCase : str = 2
elif format_for_conversion == "f32le":
lowerCAmelCase : Tuple = np.floataa
lowerCAmelCase : int = 4
else:
raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
lowerCAmelCase : int = chunk_length_s / 6
lowerCAmelCase : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_UpperCAmelCase, (int, float) ):
lowerCAmelCase : List[str] = [stride_length_s, stride_length_s]
lowerCAmelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCAmelCase : Tuple = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCAmelCase : List[Any] = datetime.datetime.now()
lowerCAmelCase : Optional[Any] = datetime.timedelta(seconds=_UpperCAmelCase )
for item in chunk_bytes_iter(_UpperCAmelCase, _UpperCAmelCase, stride=(stride_left, stride_right), stream=_UpperCAmelCase ):
# Put everything back in numpy scale
lowerCAmelCase : Optional[int] = np.frombuffer(item['raw'], dtype=_UpperCAmelCase )
lowerCAmelCase : List[Any] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowerCAmelCase : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = False ):
'''simple docstring'''
lowerCAmelCase : Any = b''
lowerCAmelCase : Any = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
lowerCAmelCase : Optional[Any] = 0
for raw in iterator:
acc += raw
if stream and len(_UpperCAmelCase ) < chunk_len:
lowerCAmelCase : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_UpperCAmelCase ) >= chunk_len:
# We are flushing the accumulator
lowerCAmelCase : Optional[int] = (_stride_left, stride_right)
lowerCAmelCase : int = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowerCAmelCase : Any = False
yield item
lowerCAmelCase : Optional[int] = stride_left
lowerCAmelCase : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_UpperCAmelCase ) > stride_left:
lowerCAmelCase : Any = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowerCAmelCase : Dict = False
yield item
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase : Tuple = 2**24 # 16Mo
try:
with subprocess.Popen(_UpperCAmelCase, stdout=subprocess.PIPE, bufsize=_UpperCAmelCase ) as ffmpeg_process:
while True:
lowerCAmelCase : int = ffmpeg_process.stdout.read(_UpperCAmelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 362 |
from manim import *
class __A ( lowerCAmelCase ):
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase : List[str] = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Dict = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : str = Text('CPU' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : int = [mem.copy() for i in range(4 )]
lowerCAmelCase : Union[str, Any] = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = Text('GPU' , font_size=24 )
lowerCAmelCase : Tuple = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : List[str] = Text('Model' , font_size=24 )
lowerCAmelCase : Union[str, Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase_ )
lowerCAmelCase : Any = []
lowerCAmelCase : Dict = []
for i, rect in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Optional[Any] = fill.copy().set_fill(UpperCAmelCase_ , opacity=0.8 )
target.move_to(UpperCAmelCase_ )
model_arr.append(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase_ )
self.add(*UpperCAmelCase_ , *UpperCAmelCase_ )
lowerCAmelCase : Dict = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase : Tuple = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : int = VGroup(*UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Tuple = VGroup(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0 )
lowerCAmelCase : Union[str, Any] = Text('Disk' , font_size=24 )
lowerCAmelCase : Optional[Any] = Group(UpperCAmelCase_ , UpperCAmelCase_ ).arrange(UpperCAmelCase_ , buff=0.5 , aligned_edge=UpperCAmelCase_ )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase : Optional[int] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Dict = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(UpperCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase_ )
lowerCAmelCase : str = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[Any] = Square(0.3 )
input.set_fill(UpperCAmelCase_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase_ , buff=0.5 )
self.play(Write(UpperCAmelCase_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase_ , buff=0.02 )
self.play(MoveToTarget(UpperCAmelCase_ ) )
self.play(FadeOut(UpperCAmelCase_ ) )
lowerCAmelCase : List[Any] = Arrow(start=UpperCAmelCase_ , end=UpperCAmelCase_ , color=UpperCAmelCase_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase : int = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) )
lowerCAmelCase : Optional[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCAmelCase_ ) , Circumscribe(model_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCAmelCase : int = AnimationGroup(
FadeOut(UpperCAmelCase_ , run_time=0.5 ) , MoveToTarget(UpperCAmelCase_ , run_time=0.5 ) , FadeIn(UpperCAmelCase_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase : List[str] = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase_ , **UpperCAmelCase_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase : int = a_c
lowerCAmelCase : Union[str, Any] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase_ ) , FadeOut(UpperCAmelCase_ , run_time=0.5 ) , )
lowerCAmelCase : int = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase_ , run_time=3 ) , MoveToTarget(UpperCAmelCase_ ) )
self.wait()
| 323 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = ShapEPipeline
_SCREAMING_SNAKE_CASE = ['prompt']
_SCREAMING_SNAKE_CASE = ['prompt']
_SCREAMING_SNAKE_CASE = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
_SCREAMING_SNAKE_CASE = False
@property
def _snake_case ( self ) -> Tuple:
return 32
@property
def _snake_case ( self ) -> Optional[Any]:
return 32
@property
def _snake_case ( self ) -> Tuple:
return self.time_input_dim * 4
@property
def _snake_case ( self ) -> str:
return 8
@property
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _snake_case ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(lowercase )
@property
def _snake_case ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
lowerCAmelCase = PriorTransformer(**lowercase )
return model
@property
def _snake_case ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCAmelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase = ShapERenderer(**lowercase )
return model
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.dummy_prior
lowerCAmelCase = self.dummy_text_encoder
lowerCAmelCase = self.dummy_tokenizer
lowerCAmelCase = self.dummy_renderer
lowerCAmelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_024 , prediction_type="""sample""" , use_karras_sigmas=lowercase , clip_sample=lowercase , clip_sample_range=1.0 , )
lowerCAmelCase = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _snake_case ( self , lowercase , lowercase=0 ) -> Optional[Any]:
if str(lowercase ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(lowercase )
else:
lowerCAmelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCAmelCase = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = """cpu"""
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase )
lowerCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = pipe(**self.get_dummy_inputs(lowercase ) )
lowerCAmelCase = output.images[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCAmelCase = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = torch_device == """cpu"""
lowerCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowercase , relax_max_difference=lowercase , )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**lowercase )
lowerCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = 1
lowerCAmelCase = 2
lowerCAmelCase = self.get_dummy_inputs(lowercase )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase = batch_size * [inputs[key]]
lowerCAmelCase = pipe(**lowercase , num_images_per_prompt=lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Any:
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
lowerCAmelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" )
lowerCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = torch.Generator(device=lowercase ).manual_seed(0 )
lowerCAmelCase = pipe(
"""a shark""" , generator=lowercase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 46 |
"""simple docstring"""
import os
def _snake_case ( ) -> Dict:
with open(os.path.dirname(lowerCamelCase__ ) + "/p022_names.txt" ) as file:
lowerCamelCase_ : str =str(file.readlines()[0] )
lowerCamelCase_ : Union[str, Any] =names.replace("\"" , "" ).split("," )
names.sort()
lowerCamelCase_ : str =0
lowerCamelCase_ : Optional[int] =0
for i, name in enumerate(lowerCamelCase__ ):
for letter in name:
name_score += ord(lowerCamelCase__ ) - 64
total_score += (i + 1) * name_score
lowerCamelCase_ : List[Any] =0
return total_score
if __name__ == "__main__":
print(solution())
| 144 | 0 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
A_ : Optional[int] = data_utils.TransfoXLTokenizer
A_ : Any = data_utils.TransfoXLCorpus
A_ : List[str] = data_utils
A_ : List[Any] = data_utils
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_UpperCamelCase , 'rb' ) as fp:
UpperCamelCase_: Dict = pickle.load(_UpperCamelCase , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase_: str = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
UpperCamelCase_: List[str] = corpus.vocab.__dict__
torch.save(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase_: Optional[int] = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , _UpperCamelCase )
UpperCamelCase_: Tuple = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(_UpperCamelCase , _UpperCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase_: List[str] = os.path.abspath(_UpperCamelCase )
UpperCamelCase_: Any = os.path.abspath(_UpperCamelCase )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase_: Any = TransfoXLConfig()
else:
UpperCamelCase_: Union[str, Any] = TransfoXLConfig.from_json_file(_UpperCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Optional[int] = TransfoXLLMHeadModel(_UpperCamelCase )
UpperCamelCase_: Optional[int] = load_tf_weights_in_transfo_xl(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
UpperCamelCase_: List[str] = os.path.join(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase_: Optional[int] = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F'''Save PyTorch model to {os.path.abspath(_UpperCamelCase )}''' )
torch.save(model.state_dict() , _UpperCamelCase )
print(F'''Save configuration file to {os.path.abspath(_UpperCamelCase )}''' )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
A_ : Dict = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 368 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def snake_case (UpperCAmelCase__ ) -> Optional[int]: # picklable for multiprocessing
return x.sum()
def snake_case (UpperCAmelCase__ ) -> Any: # picklable for multiprocessing
return i + 1
@dataclass
class _lowerCAmelCase:
"""simple docstring"""
a : int
a : str
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Optional[Any] = {}
UpperCamelCase_: List[str] = []
UpperCamelCase_: Any = 1
UpperCamelCase_: Optional[int] = [1, 2]
UpperCamelCase_: List[str] = {'a': 1, 'b': 2}
UpperCamelCase_: Tuple = {'a': [1, 2], 'b': [3, 4]}
UpperCamelCase_: Optional[int] = {'a': {'1': 1}, 'b': 2}
UpperCamelCase_: Optional[Any] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
UpperCamelCase_: Tuple = {}
UpperCamelCase_: str = []
UpperCamelCase_: List[Any] = 2
UpperCamelCase_: List[Any] = [2, 3]
UpperCamelCase_: Optional[Any] = {'a': 2, 'b': 3}
UpperCamelCase_: List[str] = {'a': [2, 3], 'b': [4, 5]}
UpperCamelCase_: Any = {'a': {'1': 2}, 'b': 3}
UpperCamelCase_: List[str] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
UpperCamelCase_: Optional[int] = 2
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
UpperCamelCase_: Tuple = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
UpperCamelCase_: Tuple = {'a': 2, 'b': 0, 'c': 2}
UpperCamelCase_: str = {
'a': np.eye(2 ).astype(_lowerCamelCase ),
'b': np.zeros(3 ).astype(_lowerCamelCase ),
'c': np.ones(2 ).astype(_lowerCamelCase ),
}
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , map_numpy=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCamelCase , _lowerCamelCase , map_numpy=_lowerCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , map_numpy=_lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCamelCase , _lowerCamelCase , map_numpy=_lowerCamelCase , num_proc=_lowerCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCamelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCamelCase : x + 1 , _lowerCamelCase , num_proc=_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[Any] = {'a': 1, 'b': 2}
UpperCamelCase_: Dict = {'a': 3, 'b': 4}
UpperCamelCase_: Optional[int] = {'a': 5, 'b': 6}
UpperCamelCase_: int = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ) , _lowerCamelCase )
def _a ( self ):
class _lowerCAmelCase:
"""simple docstring"""
a : str ='''bar'''
UpperCamelCase_: int = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(_lowerCamelCase , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(1_6, 1_6, 1_6),
(1_6, 1_7, 1_6),
(1_7, 1_6, 1_6),
] , )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
UpperCamelCase_: Any = {F'''{i}''': i for i in range(UpperCAmelCase__ )}
UpperCamelCase_: int = map_nested(lambda UpperCAmelCase__ : x + 1_0 , UpperCAmelCase__ , num_proc=UpperCAmelCase__ , parallel_min_length=1_6 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@require_tf
def _a ( self ):
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase_: Dict = layers.Dense(2 )
def gen_random_output():
UpperCamelCase_: Optional[Any] = tf.random.uniform((1, 3) )
return model(_lowerCamelCase ).numpy()
with temp_seed(4_2 , set_tensorflow=_lowerCamelCase ):
UpperCamelCase_: int = gen_random_output()
with temp_seed(4_2 , set_tensorflow=_lowerCamelCase ):
UpperCamelCase_: List[str] = gen_random_output()
UpperCamelCase_: str = gen_random_output()
np.testing.assert_equal(_lowerCamelCase , _lowerCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _a ( self ):
import torch
def gen_random_output():
UpperCamelCase_: Any = torch.nn.Linear(3 , 2 )
UpperCamelCase_: Optional[Any] = torch.rand(1 , 3 )
return model(_lowerCamelCase ).detach().numpy()
with temp_seed(4_2 , set_pytorch=_lowerCamelCase ):
UpperCamelCase_: Dict = gen_random_output()
with temp_seed(4_2 , set_pytorch=_lowerCamelCase ):
UpperCamelCase_: str = gen_random_output()
UpperCamelCase_: str = gen_random_output()
np.testing.assert_equal(_lowerCamelCase , _lowerCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _a ( self ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
UpperCamelCase_: Optional[Any] = gen_random_output()
with temp_seed(4_2 ):
UpperCamelCase_: Tuple = gen_random_output()
UpperCamelCase_: Optional[int] = gen_random_output()
np.testing.assert_equal(_lowerCamelCase , _lowerCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}] )
def snake_case (UpperCAmelCase__ ) -> Dict:
UpperCamelCase_: str = NestedDataStructure(UpperCAmelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
UpperCamelCase_: Optional[Any] = NestedDataStructure(UpperCAmelCase__ ).flatten()
assert output == expected_output
def snake_case () -> Optional[int]:
UpperCamelCase_: List[Any] = A(x=1 , y='foobar' )
UpperCamelCase_: Optional[int] = {'x': 1, 'y': 'foobar'}
assert asdict(UpperCAmelCase__ ) == expected_output
UpperCamelCase_: List[str] = {'a': {'b': A(x=1_0 , y='foo' )}, 'c': [A(x=2_0 , y='bar' )]}
UpperCamelCase_: Tuple = {'a': {'b': {'x': 1_0, 'y': 'foo'}}, 'c': [{'x': 2_0, 'y': 'bar'}]}
assert asdict(UpperCAmelCase__ ) == expected_output
with pytest.raises(UpperCAmelCase__ ):
asdict([1, A(x=1_0 , y='foo' )] )
def snake_case (UpperCAmelCase__ ) -> Optional[Any]:
return text.split()
def snake_case (UpperCAmelCase__ ) -> str:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def snake_case () -> Union[str, Any]:
with Pool(2 ) as pool:
UpperCamelCase_: Optional[Any] = list(iflatmap_unordered(UpperCAmelCase__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) )
assert out.count('hello' ) == 1_0
assert out.count('there' ) == 1_0
assert len(UpperCAmelCase__ ) == 2_0
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase_: Optional[int] = list(iflatmap_unordered(UpperCAmelCase__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) )
assert out.count('hello' ) == 1_0
assert out.count('there' ) == 1_0
assert len(UpperCAmelCase__ ) == 2_0
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase_: Any = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase__ )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(UpperCAmelCase__ ) == 4 | 292 | 0 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = len(_UpperCAmelCase )
__a = [[0] * n for i in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
__a = y_points[i]
for i in range(2 , _UpperCAmelCase ):
for j in range(_UpperCAmelCase , _UpperCAmelCase ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case :List[str] = '''\
Text data.
Second line of data.'''
__snake_case :Optional[Any] = '''file'''
@pytest.fixture(scope='''session''' )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__a = bytes(_UpperCAmelCase , '''utf-8''' )
with zstd.open(_UpperCAmelCase , '''wb''' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture
def __snake_case ( _UpperCAmelCase ):
with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , '''w''' ) as f:
f.write(_UpperCAmelCase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__a = input_paths[compression_format]
__a = tmp_path / '''cache'''
__a = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase )
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
with open(_UpperCAmelCase ) as f:
__a = f.read()
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''custom_cache'''
__a = '''custom_extracted_dir'''
__a = tmp_path / '''custom_extracted_path'''
if default_extracted:
__a = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _UpperCAmelCase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_UpperCAmelCase ) )
__a = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__a = xz_file
__a = (
DownloadConfig(extract_compressed_file=_UpperCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase )
)
__a = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(Path(_UpperCAmelCase ).resolve() )
assert cached_path(_UpperCAmelCase ) == text_file
# relative path
__a = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCAmelCase ) == text_file
def __snake_case ( _UpperCAmelCase ):
# absolute path
__a = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
# relative path
__a = '''./__missing_file__.txt'''
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = get_from_cache(f'tmp://{tmpfs_file}' )
with open(_UpperCAmelCase ) as f:
__a = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( ):
with pytest.raises(_UpperCAmelCase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
http_get('''https://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase ):
__a = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_UpperCAmelCase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
fsspec_head('''s3://huggingface.co''' )
| 49 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = CanineTokenizer
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_snake_case = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase ( self ):
"""simple docstring"""
return CanineTokenizer.from_pretrained('google/canine-s' )
def lowerCamelCase ( self , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
_snake_case = 10_24
return tokenizer
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.canine_tokenizer
_snake_case = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
_snake_case = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.canine_tokenizer
_snake_case = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
_snake_case = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertIn('token_type_ids' , lowerCAmelCase_ )
@require_torch
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.canine_tokenizer
_snake_case = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
_snake_case = tokenizer(
text_target=lowerCAmelCase_ , max_length=32 , padding='max_length' , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_snake_case = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
shutil.rmtree(lowerCAmelCase_ )
_snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_snake_case = tempfile.mkdtemp()
_snake_case = ' He is very happy, UNwant\u00E9d,running'
_snake_case = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_snake_case = chr(0XE_0_0_7 )
additional_special_tokens.append(lowerCAmelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
_snake_case = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertIn(lowerCAmelCase_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case , _snake_case = self.get_clean_sequence(lowerCAmelCase_ )
# a special token for Canine can be defined as follows:
_snake_case = 0XE_0_0_5
_snake_case = chr(lowerCAmelCase_ )
tokenizer.add_special_tokens({'cls_token': special_token} )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
_snake_case = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , input_encoded + special_token_id )
_snake_case = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case = chr(0XE_0_0_5 )
_snake_case = chr(0XE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} )
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(token_a[0] , lowerCAmelCase_ )
self.assertEqual(token_a[0] , lowerCAmelCase_ )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
_snake_case = 0XE_0_0_6
_snake_case = chr(lowerCAmelCase_ )
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCAmelCase_ )
tokenizer.from_pretrained(lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_snake_case = json.load(lowerCAmelCase_ )
# a special token for Canine can be defined as follows:
_snake_case = 0XE_0_0_6
_snake_case = chr(lowerCAmelCase_ )
_snake_case = [new_token_a]
_snake_case = [new_token_a]
with open(os.path.join(lowerCAmelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_snake_case = tokenizer_class.from_pretrained(lowerCAmelCase_ , extra_ids=0 )
self.assertIn(lowerCAmelCase_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_snake_case = 0XE_0_0_7
_snake_case = chr(lowerCAmelCase_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_snake_case = [AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )]
_snake_case = tokenizer_class.from_pretrained(
lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , extra_ids=0 )
self.assertIn(lowerCAmelCase_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case = 'hello world'
if self.space_between_special_tokens:
_snake_case = '[CLS] hello world [SEP]'
else:
_snake_case = input
_snake_case = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_snake_case = tokenizer.decode(lowerCAmelCase_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCAmelCase_ , [output, output.lower()] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_snake_case = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_snake_case = 'a'
_snake_case = ord(lowerCAmelCase_ )
for attr in attributes_list:
setattr(lowerCAmelCase_ , attr + '_id' , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + '_id' ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , attr + '_id' , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + '_id' ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens_ids' ) , [] )
_snake_case = 0XE_0_0_6
_snake_case = chr(lowerCAmelCase_ )
setattr(lowerCAmelCase_ , 'additional_special_tokens_ids' , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens' ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCAmelCase_ , 'additional_special_tokens_ids' ) , [additional_special_token_id] )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
| 160 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict:
_snake_case = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowercase = StableDiffusionLatentUpscalePipeline
__lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
__lowercase = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
__lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase = frozenset([] )
__lowercase = True
@property
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 1
_snake_case = 4
_snake_case = (16, 16)
_snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
return image
def lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=lowerCAmelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=lowerCAmelCase_ , only_cross_attention=lowerCAmelCase_ , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
_snake_case = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
_snake_case = EulerDiscreteScheduler(prediction_type='sample' )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='quick_gelu' , projection_dim=5_12 , )
_snake_case = CLIPTextModel(lowerCAmelCase_ )
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
"""simple docstring"""
if str(lowerCAmelCase_ ).startswith('mps' ):
_snake_case = torch.manual_seed(lowerCAmelCase_ )
else:
_snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu'
_snake_case = self.get_dummy_components()
_snake_case = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
_snake_case = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
_snake_case = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
_snake_case = self.get_dummy_components()
_snake_case = self.pipeline_class(**lowerCAmelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = 2
_snake_case = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_snake_case = getattr(lowerCAmelCase_ , scheduler_enum.name )
_snake_case = scheduler_cls.from_config(pipe.scheduler.config )
_snake_case = pipe(**lowerCAmelCase_ )[0]
outputs.append(lowerCAmelCase_ )
assert check_same_shape(lowerCAmelCase_ )
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = torch.manual_seed(33 )
_snake_case = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
_snake_case = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
_snake_case = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
_snake_case = pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type='latent' ).images
_snake_case = upscaler(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowerCAmelCase_ , output_type='np' , ).images[0]
_snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = torch.manual_seed(33 )
_snake_case = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
_snake_case = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
_snake_case = upscaler(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=lowerCAmelCase_ , output_type='np' , ).images[0]
_snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 160 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_UpperCAmelCase = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
_UpperCAmelCase = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
_UpperCAmelCase = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=None , lowercase=True , lowercase=False ):
"""simple docstring"""
if rouge_types is None:
A_ : Optional[int] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
A_ : List[str] = rouge_scorer.RougeScorer(rouge_types=__a , use_stemmer=__a )
if use_aggregator:
A_ : List[str] = scoring.BootstrapAggregator()
else:
A_ : List[str] = []
for ref, pred in zip(__a , __a ):
A_ : Union[str, Any] = scorer.score(__a , __a )
if use_aggregator:
aggregator.add_scores(__a )
else:
scores.append(__a )
if use_aggregator:
A_ : Optional[int] = aggregator.aggregate()
else:
A_ : Dict = {}
for key in scores[0]:
A_ : Dict = [score[key] for score in scores]
return result
| 140 |
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE = 1_000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 153 | 0 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = CustomTokenizer
pass
| 254 |
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) ->str:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase__ :List[str] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase__ :Tuple = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase__ :Union[str, Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
if decoder_head_mask is None:
lowerCAmelCase__ :List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
if cross_attn_head_mask is None:
lowerCAmelCase__ :List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=9_9 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=2_0 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = parent
lowerCAmelCase__ :Any = batch_size
lowerCAmelCase__ :Optional[Any] = seq_length
lowerCAmelCase__ :int = is_training
lowerCAmelCase__ :Tuple = use_labels
lowerCAmelCase__ :Union[str, Any] = vocab_size
lowerCAmelCase__ :Tuple = hidden_size
lowerCAmelCase__ :Tuple = num_hidden_layers
lowerCAmelCase__ :Tuple = num_attention_heads
lowerCAmelCase__ :Dict = intermediate_size
lowerCAmelCase__ :Optional[int] = hidden_act
lowerCAmelCase__ :Any = hidden_dropout_prob
lowerCAmelCase__ :Dict = attention_probs_dropout_prob
lowerCAmelCase__ :Tuple = encoder_layerdrop
lowerCAmelCase__ :Tuple = decoder_layerdrop
lowerCAmelCase__ :Tuple = max_position_embeddings
lowerCAmelCase__ :Any = eos_token_id
lowerCAmelCase__ :str = pad_token_id
lowerCAmelCase__ :Tuple = bos_token_id
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :Tuple = self.eos_token_id # Eos Token
lowerCAmelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase__ :List[Any] = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ :Dict = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ :Optional[Any] = self.get_config()
lowerCAmelCase__ :Any = prepare_mam_aaa_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def snake_case ( self ):
'''simple docstring'''
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = MaMaaaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval()
lowerCAmelCase__ :Optional[int] = inputs_dict['input_ids']
lowerCAmelCase__ :Any = inputs_dict['attention_mask']
lowerCAmelCase__ :Tuple = inputs_dict['head_mask']
# first forward pass
lowerCAmelCase__ :int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ :Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ :int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCAmelCase__ :Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ :Union[str, Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCAmelCase__ :Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )['last_hidden_state']
lowerCAmelCase__ :Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[
'last_hidden_state'
]
# select random slice
lowerCAmelCase__ :Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ :List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ :Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 ) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = MaMaaaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
lowerCAmelCase__ :List[Any] = model(**__UpperCAmelCase )
lowerCAmelCase__ :int = outputs.encoder_last_hidden_state
lowerCAmelCase__ :Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :Union[str, Any] = model.get_encoder()
encoder.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = MaMaaaEncoder.from_pretrained(__UpperCAmelCase ).to(__UpperCAmelCase )
lowerCAmelCase__ :Any = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :Optional[int] = model.get_decoder()
decoder.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Dict = MaMaaaDecoder.from_pretrained(__UpperCAmelCase ).to(__UpperCAmelCase )
lowerCAmelCase__ :int = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__magic_name__ :str = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__magic_name__ :str = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__magic_name__ :Any = True
__magic_name__ :Union[str, Any] = True
__magic_name__ :Tuple = False
__magic_name__ :List[str] = False
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = MaMaaaModelTester(self )
lowerCAmelCase__ :Tuple = ConfigTester(self , config_class=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = model_class.from_pretrained(__UpperCAmelCase , output_loading_info=__UpperCAmelCase )
self.assertEqual(info['missing_keys'] , [] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCAmelCase__ :Optional[int] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :List[Any] = copy.deepcopy(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
if not self.is_encoder_decoder:
lowerCAmelCase__ :List[str] = inputs['input_ids']
del inputs["input_ids"]
else:
lowerCAmelCase__ :int = inputs['input_ids']
lowerCAmelCase__ :str = inputs.get('decoder_input_ids' , __UpperCAmelCase )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCAmelCase__ :Tuple = wte(__UpperCAmelCase )
else:
lowerCAmelCase__ :List[Any] = wte(__UpperCAmelCase )
lowerCAmelCase__ :Dict = wte(__UpperCAmelCase )
with torch.no_grad():
model(**__UpperCAmelCase )[0]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :int = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ :Any = input_dict['input_ids']
lowerCAmelCase__ :Optional[Any] = input_ids.ne(1 ).to(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = MaMaaaForConditionalGeneration(__UpperCAmelCase ).eval().to(__UpperCAmelCase )
if torch_device == "cuda":
model.half()
model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
model.generate(num_beams=4 , do_sample=__UpperCAmelCase , early_stopping=__UpperCAmelCase , num_return_sequences=3 )
def __A (_SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__A = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(__UpperCAmelCase )
lowerCAmelCase__ :str = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
lowerCAmelCase__ :Optional[int] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
lowerCAmelCase__ :Union[str, Any] = prepare_mam_aaa_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :Any = model(**__UpperCAmelCase )[0]
lowerCAmelCase__ :List[str] = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __UpperCAmelCase )
# change to expected output here
lowerCAmelCase__ :int = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=__UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(__UpperCAmelCase )
# change to intended input
lowerCAmelCase__ :str = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
lowerCAmelCase__ :Any = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
lowerCAmelCase__ :List[Any] = prepare_mam_aaa_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :List[Any] = model(**__UpperCAmelCase )[0]
lowerCAmelCase__ :Any = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
# change to expected output here
lowerCAmelCase__ :List[Any] = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=__UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
lowerCAmelCase__ :Tuple = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='pt' )
lowerCAmelCase__ :List[Any] = model.generate(
input_ids=dct['input_ids'].to(__UpperCAmelCase ) , attention_mask=dct['attention_mask'].to(__UpperCAmelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
lowerCAmelCase__ :Optional[Any] = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
lowerCAmelCase__ :Any = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
assert generated == expected_en
| 254 | 1 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __A (*_SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
with open(_a , 'r' ) as fh:
fcntl.flock(_a , fcntl.LOCK_EX )
try:
print(*_a )
finally:
fcntl.flock(_a , fcntl.LOCK_UN )
__A = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
__A = torch.device("""cuda""", local_rank)
__A = socket.gethostname()
__A = F'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__A = dist.get_rank()
__A = dist.get_world_size()
printflock(F'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(F'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(F'''{gpu} is broken''')
raise
| 293 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 131 | 0 |
"""simple docstring"""
class __snake_case : # Public class to implement a graph
def __init__( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : list[list[bool]] ) -> None:
'''simple docstring'''
_lowerCAmelCase : List[Any] = row
_lowerCAmelCase : Tuple = col
_lowerCAmelCase : Dict = graph
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : list[list[bool]] ) -> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : list[list[bool]] ) -> None:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_lowerCAmelCase : Optional[Any] = [-1, 0, 1, -1, 1, -1, 0, 1]
_lowerCAmelCase : Tuple = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _UpperCAmelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: # And finally, count all islands.
'''simple docstring'''
_lowerCAmelCase : Any = [[False for j in range(self.COL )] for i in range(self.ROW )]
_lowerCAmelCase : Dict = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
count += 1
return count
| 364 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class __snake_case (_a ):
def __init__( self : Optional[Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : Any ) -> None:
'''simple docstring'''
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 159 | 0 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def UpperCamelCase_ ( _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = []
for line in lines:
_UpperCAmelCase : Optional[Any] = re.sub(R"#.*" , "" , _UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(_UpperCAmelCase )
_UpperCAmelCase : Optional[int] = "\n".join(_UpperCAmelCase )
# Make a hash from all this code
_UpperCAmelCase : Optional[int] = full_str.encode("utf-8" )
return shaaaa(_UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__SCREAMING_SNAKE_CASE : Tuple = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__SCREAMING_SNAKE_CASE : str = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
__SCREAMING_SNAKE_CASE : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 31 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
UpperCAmelCase__ : Any = 3
def lowerCamelCase__ ( a ) -> int:
print('''Generating primitive root of p''' )
while True:
_A: Union[str, Any] = random.randrange(3 , a )
if pow(a , 2 , a ) == 1:
continue
if pow(a , a , a ) == 1:
continue
return g
def lowerCamelCase__ ( a ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('''Generating prime p...''' )
_A: Dict = rabin_miller.generate_large_prime(a ) # select large prime number.
_A: Any = primitive_root(a ) # one primitive root on modulo p.
_A: Optional[Any] = random.randrange(3 , a ) # private_key -> have to be greater than 2 for safety.
_A: Dict = cryptomath.find_mod_inverse(pow(a , a , a ) , a )
_A: Union[str, Any] = (key_size, e_a, e_a, p)
_A: Union[str, Any] = (key_size, d)
return public_key, private_key
def lowerCamelCase__ ( a , a ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
_A , _A: Any = generate_key(a )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , '''w''' ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , '''w''' ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def lowerCamelCase__ ( ) -> None:
print('''Making key files...''' )
make_key_files('''elgamal''' , 20_48 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 121 | 0 |
'''simple docstring'''
from ... import PretrainedConfig
lowerCAmelCase : List[str] = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCAmelCase_ = """nezha"""
def __init__( self , A_=21128 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=64 , A_=2 , A_=0.02 , A_=1e-12 , A_=0.1 , A_=0 , A_=2 , A_=3 , A_=True , **A_ , )-> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = max_relative_position
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = classifier_dropout
UpperCamelCase = use_cache
| 251 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = WavaVecaPhonemeCTCTokenizer
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
super().setUp()
UpperCamelCase = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def UpperCAmelCase_ ( self , A_ , A_=False , A_=20 , A_=5 )-> Tuple[str, list]:
'''simple docstring'''
UpperCamelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=A_ )) for i in range(len(A_ ) )]
UpperCamelCase = list(filter(lambda A_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=A_ ) , A_ ) )
if max_length is not None and len(A_ ) > max_length:
UpperCamelCase = toks[:max_length]
if min_length is not None and len(A_ ) < min_length and len(A_ ) > 0:
while len(A_ ) < min_length:
UpperCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCamelCase = [t[0] for t in toks]
# Ensure consistency
UpperCamelCase = tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ )
if " " not in output_txt and len(A_ ) > 1:
UpperCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=A_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=A_ )
)
if with_prefix_space:
UpperCamelCase = ' ' + output_txt
UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
return output_txt, output_ids
def UpperCAmelCase_ ( self , **A_ )-> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
UpperCamelCase = tokenizer('m xxx ɪ' , do_phonemize=A_ ).input_ids
self.assertEqual(A_ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
UpperCamelCase = tokenizer('m aaa ɪ ccc' , do_phonemize=A_ ).input_ids
self.assertEqual(A_ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
UpperCamelCase = tokenizer('maɪ c' , do_phonemize=A_ ).input_ids
self.assertEqual(A_ , [3, 200] ) # mai should be <unk> (=3)
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
self.assertEqual(A_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(A_ ).input_ids , tokenizer(A_ , do_phonemize=A_ ).input_ids )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
UpperCamelCase = tokenizer.decode(tokenizer(A_ ).input_ids )
self.assertEqual(A_ , A_ )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
UpperCamelCase = tokenizer.decode(sample_ids[0] )
UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertEqual(A_ , batch_tokens[0] )
self.assertEqual(A_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
self.assertEqual(A_ , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(A_ ).input_ids , tokenizer(A_ , do_phonemize=A_ ).input_ids )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
UpperCamelCase = tokenizer.decode(sample_ids[0] )
UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertEqual(A_ , batch_tokens[0] )
self.assertEqual(A_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
UpperCamelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=A_ )
UpperCamelCase = tokenizer.batch_decode(A_ , filter_word_delimiter_token=A_ )
self.assertEqual(A_ , batch_tokens[0] )
self.assertEqual(A_ , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
UpperCamelCase = tokenizer.decode(tokenizer(A_ ).input_ids , filter_word_delimiter_token=A_ )
self.assertEqual(A_ , A_ )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer.phonemize(A_ , phonemizer_lang='en-us' )
UpperCamelCase = tokenizer.decode(tokenizer(A_ ).input_ids , filter_word_delimiter_token=A_ )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , A_ )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=A_ )
UpperCamelCase = 'Hello how are you'
UpperCamelCase = tokenizer(A_ , phonemizer_lang='en-us' ).input_ids
UpperCamelCase = tokenizer(A_ , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(A_ , A_ )
UpperCamelCase = tokenizer.decode(A_ )
UpperCamelCase = tokenizer.decode(A_ )
self.assertEqual(A_ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(A_ , 'ɛ l o h aʊ a ʁ j u' )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
UpperCamelCase = 'Hello how Are you'
UpperCamelCase = 'hello how are you'
UpperCamelCase = tokenizer(A_ ).input_ids
UpperCamelCase = tokenizer(A_ ).input_ids
self.assertEqual(A_ , A_ )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertEqual(A_ , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def UpperCAmelCase_ ( A_ , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
UpperCamelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
UpperCamelCase = tokenizer.decode(A_ , output_char_offsets=A_ , filter_word_delimiter_token=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(A_ , A_ ):
self.assertTrue(isinstance(A_ , A_ ) )
self.assertTrue(isinstance(outputs_list[0] , A_ ) )
# transform list to ModelOutput
UpperCamelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(A_ , A_ ):
if isinstance(A_ , A_ ):
[recursive_check(A_ , A_ ) for la, la in zip(A_ , A_ )]
self.assertEqual(A_ , A_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
UpperCamelCase = tokenizer.batch_decode(A_ , output_char_offsets=A_ )
UpperCamelCase = [tokenizer.decode(A_ , output_char_offsets=A_ ) for ids in sample_ids]
check_list_tuples_equal(A_ , A_ )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(A_ )
self.assertNotEqual(A_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCamelCase = ['aaaaa bbbbbb', 'cccccccccdddddddd']
UpperCamelCase = tokenizer.add_tokens(A_ )
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(A_ )
self.assertNotEqual(A_ , 0 )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , len(A_ ) )
self.assertEqual(A_ , all_size + len(A_ ) )
UpperCamelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCamelCase = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
UpperCamelCase = tokenizer.add_special_tokens(A_ )
UpperCamelCase = tokenizer.vocab_size
UpperCamelCase = len(A_ )
self.assertNotEqual(A_ , 0 )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , len(A_ ) )
self.assertEqual(A_ , all_size_a + len(A_ ) )
UpperCamelCase = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=A_ )
self.assertGreaterEqual(len(A_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.get_tokenizers(fast=A_ , do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
UpperCamelCase = tokenizer.convert_tokens_to_string(A_ )
self.assertIsInstance(output['text'] , A_ )
| 251 | 1 |
'''simple docstring'''
import torch
from transformers import AutoModel
class snake_case ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __A : Optional[Any]="sayef/fsner-bert-base-uncased" ):
super(__SCREAMING_SNAKE_CASE , self ).__init__()
__UpperCamelCase = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
__UpperCamelCase = torch.nn.CosineSimilarity(3 , 1e-08 )
__UpperCamelCase = torch.nn.Softmax(dim=1 )
def _lowerCamelCase ( self : List[str] , **__A : Optional[Any] ):
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _lowerCamelCase ( self : List[Any] , __A : List[Any] ):
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self : Tuple , __A : List[str] , __A : int , __A : Optional[Any]=1 ):
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _lowerCamelCase ( self : int , __A : List[Any] , __A : Tuple ):
__UpperCamelCase = W_supports['''sizes'''].tolist()
__UpperCamelCase = W_supports['''start_token_id'''].item()
__UpperCamelCase = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCamelCase = self.BERT(**__SCREAMING_SNAKE_CASE )
__UpperCamelCase = self.BERT(**__SCREAMING_SNAKE_CASE )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = W_supports['''input_ids'''] == start_token_id
__UpperCamelCase = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
__UpperCamelCase = 0
else:
__UpperCamelCase = support_sizes[i - 1]
__UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
__UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
__UpperCamelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__UpperCamelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCamelCase = torch.vstack((p_starts, p_start) )
__UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
__UpperCamelCase = p_start
__UpperCamelCase = p_end
return p_starts, p_ends
| 53 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
lowerCAmelCase_ = 42
# setable values
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(common=__SCREAMING_SNAKE_CASE , init_noise_sigma=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
@dataclass
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = 42
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCAmelCase_ = 42
@property
def _snake_case ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 10_00 , __SCREAMING_SNAKE_CASE = 0.0_001 , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = "linear" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "fixed_small" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = "epsilon" , __SCREAMING_SNAKE_CASE = jnp.floataa , ):
"""simple docstring"""
lowercase_ : Dict = dtype
def _snake_case ( self , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if common is None:
lowercase_ : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase_ : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
lowercase_ : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__SCREAMING_SNAKE_CASE , init_noise_sigma=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
return sample
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = () ):
"""simple docstring"""
lowercase_ : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase_ : int = (jnp.arange(0 , __SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase_ : List[Any] = state.common.alphas_cumprod[t]
lowercase_ : str = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase_ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase_ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase_ : int = jnp.clip(__SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase_ : List[str] = jnp.log(jnp.clip(__SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase_ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase_ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase_ : Optional[Any] = variance
lowercase_ : Union[str, Any] = state.common.betas[t]
lowercase_ : Union[str, Any] = (predicted_variance + 1) / 2
lowercase_ : Any = frac * max_log + (1 - frac) * min_log
return variance
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
lowercase_ : Optional[int] = timestep
if key is None:
lowercase_ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase_ , lowercase_ : Optional[Any] = jnp.split(__SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase_ : int = None
# 1. compute alphas, betas
lowercase_ : Any = state.common.alphas_cumprod[t]
lowercase_ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase_ : int = 1 - alpha_prod_t
lowercase_ : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase_ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase_ : Any = model_output
elif self.config.prediction_type == "v_prediction":
lowercase_ : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase_ : Optional[Any] = jnp.clip(__SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase_ : Optional[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase_ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase_ : str = jax.random.split(__SCREAMING_SNAKE_CASE , num=1 )
lowercase_ : List[Any] = jax.random.normal(__SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , predicted_variance=__SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase_ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase_ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return add_noise_common(state.common , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return get_velocity_common(state.common , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 93 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Any = logging.get_logger(__name__)
__snake_case : Tuple = {}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'llama'
__snake_case = ['past_key_values']
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Any=3_20_00 , lowerCAmelCase_ : Optional[Any]=40_96 , lowerCAmelCase_ : Any=1_10_08 , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[int]="silu" , lowerCAmelCase_ : Optional[int]=20_48 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : str=1e-6 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Tuple , ) -> Dict:
'''simple docstring'''
A__ : Dict =vocab_size
A__ : List[str] =max_position_embeddings
A__ : Any =hidden_size
A__ : List[str] =intermediate_size
A__ : List[Any] =num_hidden_layers
A__ : Optional[int] =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
A__ : Optional[Any] =num_attention_heads
A__ : Dict =num_key_value_heads
A__ : Tuple =hidden_act
A__ : Optional[int] =initializer_range
A__ : Optional[int] =rms_norm_eps
A__ : Optional[Any] =pretraining_tp
A__ : int =use_cache
A__ : Any =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , **lowerCAmelCase_ , )
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"got {self.rope_scaling}" )
A__ : Optional[Any] =self.rope_scaling.get("""type""" , lowerCAmelCase_ )
A__ : Dict =self.rope_scaling.get("""factor""" , lowerCAmelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 136 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'FlavaImageProcessor'
__snake_case = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase_ , )
A__ : Optional[Any] =kwargs.pop("""feature_extractor""" )
A__ : List[Any] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : int =self.image_processor
def __call__( self : Union[str, Any] , lowerCAmelCase_ : Optional[ImageInput] = None , lowerCAmelCase_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Optional[int] , ) -> str:
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
A__ : int =self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if images is not None:
A__ : List[str] =self.image_processor(
lowerCAmelCase_ , return_image_mask=lowerCAmelCase_ , return_codebook_pixels=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def lowercase__ ( self : Any , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[int] ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase__ ( self : List[Any] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Any ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : Any =self.tokenizer.model_input_names
A__ : Optional[Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase_ , )
return self.image_processor_class
@property
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase_ , )
return self.image_processor
| 136 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=2 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__=None , UpperCamelCase__=2 , UpperCamelCase__=2 , ) -> List[Any]:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Any = batch_size
lowerCamelCase : List[Any] = patch_size
lowerCamelCase : List[str] = max_length
lowerCamelCase : Optional[Any] = num_mel_bins
lowerCamelCase : Optional[int] = is_training
lowerCamelCase : Optional[int] = use_labels
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Union[str, Any] = num_hidden_layers
lowerCamelCase : str = num_attention_heads
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : int = type_sequence_label_size
lowerCamelCase : int = initializer_range
lowerCamelCase : Union[str, Any] = scope
lowerCamelCase : str = frequency_stride
lowerCamelCase : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCamelCase : Optional[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowerCamelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
lowerCamelCase : str = frequency_out_dimension * time_out_dimension
lowerCamelCase : str = num_patches + 2
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
lowerCamelCase : List[Any] = None
if self.use_labels:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : int = self.get_config()
return config, input_values, labels
def _lowercase ( self ) -> Optional[Any]:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
lowerCamelCase : Union[str, Any] = ASTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : int = config_and_inputs
lowerCamelCase : int = {"input_values": input_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase_ : Optional[int] = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Any = False
lowerCamelCase_ : List[Any] = False
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowercase ( self ) -> int:
lowerCamelCase : List[Any] = ASTModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _lowercase ( self ) -> List[Any]:
pass
def _lowercase ( self ) -> str:
lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowercase ( self ) -> Dict:
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Dict = model_class(UpperCamelCase__ )
lowerCamelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : int = [*signature.parameters.keys()]
lowerCamelCase : Union[str, Any] = ["input_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def _lowercase ( self ) -> str:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ASTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def A ( ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" ,filename="sample_audio.flac" ,repo_type="dataset" )
lowerCamelCase , lowerCamelCase : List[str] = torchaudio.load(_SCREAMING_SNAKE_CASE )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> List[Any]:
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : List[Any] = self.default_feature_extractor
lowerCamelCase : Dict = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(UpperCamelCase__ )
lowerCamelCase : Dict = self.default_feature_extractor
lowerCamelCase , lowerCamelCase : int = prepare_audio()
lowerCamelCase : Union[str, Any] = audio.squeeze().numpy()
lowerCamelCase : str = feature_extractor(UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase : List[str] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase : List[Any] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 48 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE__ : Any = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : int = EfficientNetConfig()
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["hidden_dim"]
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["width_coef"]
lowerCamelCase : Any = CONFIG_MAP[model_name]["depth_coef"]
lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["dropout_rate"]
lowerCamelCase : str = CONFIG_MAP[model_name]["dw_padding"]
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : Any = 1000
lowerCamelCase : Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
return config
def A ( ) -> int:
lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : str = EfficientNetImageProcessor(
size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47853944, 0.4732864, 0.47434163] ,do_center_crop=_SCREAMING_SNAKE_CASE ,)
return preprocessor
def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowerCamelCase : Any = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE ,range(_SCREAMING_SNAKE_CASE ) )}
lowerCamelCase : List[Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowerCamelCase : Dict = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowerCamelCase : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCamelCase : List[str] = "efficientnet." + item[1]
lowerCamelCase : int = "classifier.weight"
lowerCamelCase : Union[str, Any] = "classifier.bias"
return key_mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCamelCase : Tuple = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
lowerCamelCase : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
lowerCamelCase : List[str] = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowerCamelCase : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCamelCase : Optional[int] = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE ,weights="imagenet" ,input_tensor=_SCREAMING_SNAKE_CASE ,input_shape=_SCREAMING_SNAKE_CASE ,pooling=_SCREAMING_SNAKE_CASE ,classes=1000 ,classifier_activation="softmax" ,)
lowerCamelCase : List[Any] = original_model.trainable_variables
lowerCamelCase : Tuple = original_model.non_trainable_variables
lowerCamelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCamelCase : List[str] = param.numpy()
lowerCamelCase : int = list(tf_params.keys() )
# Load HuggingFace model
lowerCamelCase : Union[str, Any] = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowerCamelCase : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowerCamelCase : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowerCamelCase : int = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = preprocessor(images=prepare_img() ,return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = outputs.logits.detach().numpy()
# Original model inference
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Any = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
lowerCamelCase : Union[str, Any] = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = np.expand_dims(_SCREAMING_SNAKE_CASE ,axis=0 )
lowerCamelCase : Dict = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
lowerCamelCase : int = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 48 | 1 |
"""simple docstring"""
__lowercase = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__lowercase = [{'type': 'code', 'content': INSTALL_CONTENT}]
__lowercase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 357 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
__lowercase = {
'''gpt-neox-20b''': 2_048,
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : Optional[int]=False , **UpperCamelCase__ : str , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
__UpperCamelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
__UpperCamelCase =getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
__UpperCamelCase =add_prefix_space
__UpperCamelCase =pre_tok_class(**UpperCamelCase__ )
__UpperCamelCase =add_prefix_space
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCamelCase =self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : "Conversation" ) -> List[int]:
'''simple docstring'''
__UpperCamelCase =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
__UpperCamelCase =input_ids[-self.model_max_length :]
return input_ids
| 85 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
UpperCamelCase__ = logging.getLogger(__name__)
@dataclass
class a__ :
_a : str
_a : List[str]
_a : Optional[List[str]]
@dataclass
class a__ :
_a : List[int]
_a : List[int]
_a : Optional[List[int]] = None
_a : Optional[List[int]] = None
class a__ ( snake_case__ ):
_a : Optional[int] = """train"""
_a : int = """dev"""
_a : Tuple = """test"""
class a__ :
@staticmethod
def __SCREAMING_SNAKE_CASE( _A , _A ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __SCREAMING_SNAKE_CASE( _A ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __SCREAMING_SNAKE_CASE( _A , _A , _A , _A , _A=False , _A="[CLS]" , _A=1 , _A="[SEP]" , _A=False , _A=False , _A=0 , _A=0 , _A=-1_0_0 , _A=0 , _A=True , ):
"""simple docstring"""
__lowerCAmelCase = {label: i for i, label in enumerate(_A )}
__lowerCAmelCase = []
for ex_index, example in enumerate(_A ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d of %d" , _A , len(_A ) )
__lowerCAmelCase = []
__lowerCAmelCase = []
for word, label in zip(example.words , example.labels ):
__lowerCAmelCase = tokenizer.tokenize(_A )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_A ) > 0:
tokens.extend(_A )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_A ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(_A ) > max_seq_length - special_tokens_count:
__lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
__lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__lowerCAmelCase = [sequence_a_segment_id] * len(_A )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__lowerCAmelCase = [cls_token] + tokens
__lowerCAmelCase = [pad_token_label_id] + label_ids
__lowerCAmelCase = [cls_token_segment_id] + segment_ids
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(_A )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(_A )
# Zero-pad up to the sequence length.
__lowerCAmelCase = max_seq_length - len(_A )
if pad_on_left:
__lowerCAmelCase = ([pad_token] * padding_length) + input_ids
__lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
__lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_A ) == max_seq_length
assert len(_A ) == max_seq_length
assert len(_A ) == max_seq_length
assert len(_A ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(_A ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(_A ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(_A ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(_A ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(_A ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=_A , attention_mask=_A , token_type_ids=_A , label_ids=_A ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class a__ ( snake_case__ ):
_a : List[InputFeatures]
_a : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , _A , _A , _A , _A , _A , _A = None , _A=False , _A = Split.train , ):
"""simple docstring"""
__lowerCAmelCase = os.path.join(
_A , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(_A ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + ".lock"
with FileLock(_A ):
if os.path.exists(_A ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__lowerCAmelCase = torch.load(_A )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__lowerCAmelCase = token_classification_task.read_examples_from_file(_A , _A )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCAmelCase = token_classification_task.convert_examples_to_features(
_A , _A , _A , _A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _A )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _A ):
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class a__ :
_a : List[InputFeatures]
_a : int = -1_0_0
def __init__( self , _A , _A , _A , _A , _A , _A = None , _A=False , _A = Split.train , ):
"""simple docstring"""
__lowerCAmelCase = token_classification_task.read_examples_from_file(_A , _A )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCAmelCase = token_classification_task.convert_examples_to_features(
_A , _A , _A , _A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCAmelCase = tf.data.Dataset.from_generator(
_A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__lowerCAmelCase = tf.data.Dataset.from_generator(
_A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _A ):
"""simple docstring"""
return self.features[i]
| 92 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = tokenizer('This is me' , return_tensors='pt' )
lowerCAmelCase__ :Dict = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCAmelCase__ :Optional[Any] = model.generate(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCAmelCase__ :Union[str, Any] = model_reloaded.generate(**__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = 'hf-internal-testing/tiny-random-t5'
lowerCAmelCase__ :Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__UpperCAmelCase ):
model.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = model.reverse_bettertransformer()
model.save_pretrained(__UpperCAmelCase )
| 293 | 0 |
def a__ ( A__, A__, A__, A__, A__ ):
if index == number_of_items:
return 0
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : List[Any] = knapsack(A__, A__, A__, A__, index + 1 )
if weights[index] <= max_weight:
SCREAMING_SNAKE_CASE_ : Tuple = values[index] + knapsack(
A__, A__, A__, max_weight - weights[index], index + 1 )
return max(A__, A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCAmelCase__ : Optional[Any] ='src/transformers'
lowerCAmelCase__ : int ='docs/source/en/tasks'
def a__ ( A__, A__, A__ ):
with open(A__, 'r', encoding='utf-8', newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ : Any = 0
while not lines[start_index].startswith(A__ ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : int = start_index
while not lines[end_index].startswith(A__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ : Any =direct_transformers_import(TRANSFORMERS_PATH)
lowerCAmelCase__ : Dict ={
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCAmelCase__ : Union[str, Any] ={
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TASK_GUIDE_TO_MODELS[task_guide]
SCREAMING_SNAKE_CASE_ : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(A__, set() )
SCREAMING_SNAKE_CASE_ : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def a__ ( A__, A__=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = _find_text_in_file(
filename=os.path.join(A__, A__ ), start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->', end_prompt='<!--End of the generated tip-->', )
SCREAMING_SNAKE_CASE_ : str = get_model_list_for_task(A__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(A__, A__ ), 'w', encoding='utf-8', newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ : int =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ : Union[str, Any] =parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 162 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] ="trajectory_transformer"
UpperCAmelCase_ : str =["past_key_values"]
UpperCAmelCase_ : Optional[Any] ={
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , UpperCAmelCase=100 , UpperCAmelCase=5 , UpperCAmelCase=1 , UpperCAmelCase=1 , UpperCAmelCase=249 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=25 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=128 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0_006 , UpperCAmelCase=512 , UpperCAmelCase=0.02 , UpperCAmelCase=1E-12 , UpperCAmelCase=1 , UpperCAmelCase=True , UpperCAmelCase=1 , UpperCAmelCase=50256 , UpperCAmelCase=50256 , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = action_weight
__snake_case : int = reward_weight
__snake_case : List[Any] = value_weight
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[Any] = block_size
__snake_case : str = action_dim
__snake_case : Union[str, Any] = observation_dim
__snake_case : Tuple = transition_dim
__snake_case : Any = learning_rate
__snake_case : Dict = n_layer
__snake_case : int = n_head
__snake_case : Tuple = n_embd
__snake_case : str = embd_pdrop
__snake_case : Any = attn_pdrop
__snake_case : Tuple = resid_pdrop
__snake_case : Any = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : List[str] = kaiming_initializer_range
__snake_case : Tuple = use_cache
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
| 326 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : str =JukeboxTokenizer
UpperCAmelCase_ : Tuple ={
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
import torch
__snake_case : List[str] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
__snake_case : Union[str, Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case : Optional[Any] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
import torch
__snake_case : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
__snake_case : Tuple = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case : int = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 326 | 1 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple:
'''simple docstring'''
stooge(SCREAMING_SNAKE_CASE__ , 0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
return arr
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_UpperCAmelCase , _UpperCAmelCase : int = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_UpperCAmelCase : int = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE__ , i + t , (SCREAMING_SNAKE_CASE__) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , (h - t) )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
_lowerCAmelCase : Any = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 202 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Union[str, Any] = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=64 , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = np.random.default_rng(UpperCamelCase_ )
UpperCamelCase__ :Tuple = length
UpperCamelCase__ :Optional[int] = rng.normal(size=(length,) ).astype(np.floataa )
UpperCamelCase__ :str = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
'''simple docstring'''
return self.length
def __getitem__( self , UpperCamelCase_ ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCamelCase__ :List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCamelCase__ :Tuple = True
def lowerCAmelCase__ ( self , UpperCamelCase_=None ):
'''simple docstring'''
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCamelCase__ :Tuple = False
return x * self.a[0] + self.b[0]
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :List[str] = torch.nn.Parameter(torch.tensor(UpperCamelCase_ ).float() )
UpperCamelCase__ :List[Any] = torch.nn.Parameter(torch.tensor(UpperCamelCase_ ).float() )
UpperCamelCase__ :str = True
def lowerCAmelCase__ ( self , UpperCamelCase_=None ):
'''simple docstring'''
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCamelCase__ :Optional[int] = False
return x * self.a + self.b
def a ( __a , __a = 16 ) -> Union[str, Any]:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCamelCase__ :Any = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCamelCase__ :Tuple = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
UpperCamelCase__ :Dict = load_dataset('''csv''' , data_files=__a )
UpperCamelCase__ :int = datasets['''train'''].unique('''label''' )
UpperCamelCase__ :List[str] = {v: i for i, v in enumerate(__a )}
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Any = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a , padding='''max_length''' )
if "label" in examples:
UpperCamelCase__ :str = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ :str = datasets.map(
__a , batched=__a , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCamelCase__ :int = DataLoader(tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=2 )
UpperCamelCase__ :Dict = DataLoader(tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=1 )
return train_dataloader, eval_dataloader | 97 |
'''simple docstring'''
from collections import defaultdict
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase__ :Union[str, Any] = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase__ :str = defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase__ :Optional[int] = (1 << len(UpperCamelCase_ )) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase__ :str = self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase__ :Optional[int] = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 97 | 1 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=0 ) -> str:
# Format the message.
if name is None:
snake_case : List[str] = None
else:
snake_case : Tuple = """.""" * max(0 ,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
snake_case : Tuple = fmt.format(lowercase )
# Print and recurse (if needed).
if isinstance(lowercase ,lowercase ):
if msg is not None:
print(lowercase )
for k in val.keys():
recursive_print(lowercase ,val[k] ,spaces + 2 )
elif isinstance(lowercase ,torch.Tensor ):
print(lowercase ,""":""" ,val.size() )
else:
print(lowercase ,""":""" ,lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ) -> Dict:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
snake_case : Optional[int] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case : Union[str, Any] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case : List[str] = param.view(*lowercase )
snake_case : int = param.transpose(0 ,2 )
snake_case : List[Any] = param.transpose(1 ,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case : List[str] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case : Tuple = param.view(*lowercase )
snake_case : Union[str, Any] = param.transpose(0 ,1 ).contiguous()
snake_case : int = param.view(*lowercase )
return param
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# The converted output model.
snake_case : List[Any] = {}
# old versions did not store training args
snake_case : Optional[int] = input_state_dict.get("""args""" ,lowercase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case : List[Any] = ds_args.padded_vocab_size
snake_case : List[Any] = ds_args.max_position_embeddings
snake_case : Any = ds_args.hidden_size
snake_case : Tuple = ds_args.num_layers
snake_case : Any = ds_args.num_attention_heads
snake_case : Union[str, Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case : Union[str, Any] = config.n_head
# The hidden_size per head.
snake_case : Dict = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case : List[str] = 0.0
# The model.
snake_case : Any = input_state_dict["""model"""]
# The language model.
snake_case : List[Any] = model["""language_model"""]
# The embeddings.
snake_case : Dict = lm["""embedding"""]
# The word embeddings.
snake_case : int = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case : Optional[Any] = word_embeddings[: config.vocab_size, :]
snake_case : Optional[Any] = word_embeddings
# The position embeddings.
snake_case : Optional[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case : int = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
snake_case : Optional[Any] = pos_embeddings
# The transformer.
snake_case : Any = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case : str = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case : Dict = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case : int = layer_re.match(lowercase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case : List[str] = int(m.group(1 ) )
# The name of the operation.
snake_case : List[str] = m.group(2 )
# Is it a weight or a bias?
snake_case : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case : Optional[Any] = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case : str = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case : Dict = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) ,dtype=torch.floataa ) ).view(
1 ,1 ,lowercase ,lowercase )
snake_case : Dict = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case : Tuple = torch.tensor(-1E4 ,dtype=torch.floataa )
snake_case : Dict = masked_bias
snake_case : Optional[Any] = fix_query_key_value_ordering(lowercase ,lowercase ,3 ,lowercase ,lowercase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case : str = out_val.transpose(0 ,1 ).contiguous()
# Store.
snake_case : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case : str = fix_query_key_value_ordering(lowercase ,lowercase ,3 ,lowercase ,lowercase )
# Store. No change of shape.
snake_case : Dict = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case : List[str] = megatron_to_transformers[op_name]
snake_case : Tuple = val.transpose(0 ,1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case : List[Any] = megatron_to_transformers[op_name]
snake_case : Optional[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case : List[Any] = transformer["""final_layernorm.weight"""]
snake_case : Union[str, Any] = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case : Dict = word_embeddings
# It should be done!
return output_state_dict
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
# Create the argument parser.
snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" ,action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" ,type=lowercase ,help="""Path to the checkpoint file (.zip archive or direct .pt file)""" ,)
parser.add_argument(
"""--config_file""" ,default="""""" ,type=lowercase ,help="""An optional config json file describing the pre-trained model.""" ,)
snake_case : str = parser.parse_args()
# Extract the basename.
snake_case : Tuple = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint ,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case : Any = torch.load(lowercase ,map_location="""cpu""" )
else:
snake_case : int = torch.load(args.path_to_checkpoint ,map_location="""cpu""" )
snake_case : Optional[Any] = input_state_dict.get("""args""" ,lowercase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case : Any = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case : str = """gelu_new"""
else:
snake_case : List[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case : str = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case : Optional[int] = GPTaConfig(
vocab_size=50257 ,n_positions=1024 ,n_embd=1024 ,n_layer=24 ,n_head=16 ,n_inner=4096 ,activation_function=lowercase ,resid_pdrop=0.1 ,embd_pdrop=0.1 ,attn_pdrop=0.1 ,layer_norm_epsilon=1E-5 ,initializer_range=0.02 ,summary_type="""cls_index""" ,summary_use_proj=lowercase ,summary_activation=lowercase ,summary_proj_to_labels=lowercase ,summary_first_dropout=0.1 ,scale_attn_weights=lowercase ,use_cache=lowercase ,bos_token_id=50256 ,eos_token_id=50256 ,)
else:
snake_case : str = GPTaConfig.from_json_file(args.config_file )
snake_case : Tuple = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case : Tuple = convert_megatron_checkpoint(lowercase ,lowercase ,lowercase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowercase ,lowercase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case : Union[str, Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
snake_case : List[Any] = """gpt2"""
snake_case : str = AutoTokenizer.from_pretrained(lowercase )
snake_case : int = type(lowercase ).__name__
snake_case : Dict = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(lowercase )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(lowercase )
# Store the state_dict to file.
snake_case : Union[str, Any] = os.path.join(lowercase ,"""pytorch_model.bin""" )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(lowercase ,lowercase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 362 |
import warnings
from functools import wraps
from typing import Callable
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase ,**lowercase ):
warnings.warn(
(f"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") ,lowercase ,)
return fn(*lowercase ,**lowercase )
return _inner_fn
| 176 | 0 |
"""simple docstring"""
import sys
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = [[0 for x in range(SCREAMING_SNAKE_CASE )] for x in range(SCREAMING_SNAKE_CASE )]
lowerCAmelCase = [[0 for x in range(SCREAMING_SNAKE_CASE )] for x in range(SCREAMING_SNAKE_CASE )]
for chain_length in range(2 , SCREAMING_SNAKE_CASE ):
for a in range(1 , n - chain_length + 1 ):
lowerCAmelCase = a + chain_length - 1
lowerCAmelCase = sys.maxsize
for c in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCAmelCase = cost
lowerCAmelCase = c
return matrix, sol
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if i == j:
print("""A""" + str(SCREAMING_SNAKE_CASE ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , optimal_solution[i][j] )
print_optiomal_solution(SCREAMING_SNAKE_CASE , optimal_solution[i][j] + 1 , SCREAMING_SNAKE_CASE )
print(""")""" , end=""" """ )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = [30, 35, 15, 5, 10, 20, 25]
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCAmelCase , lowerCAmelCase = matrix_chain_order(SCREAMING_SNAKE_CASE )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(SCREAMING_SNAKE_CASE , 1 , n - 1 )
if __name__ == "__main__":
main()
| 46 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_SCREAMING_SNAKE_CASE = Features({'text': Value('string' )} )
_SCREAMING_SNAKE_CASE = Features({} )
_SCREAMING_SNAKE_CASE = "text"
@property
def _snake_case ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 46 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ = typing.Union[np.floataa, int, float] # noqa: UP007
def __lowercase ( lowerCamelCase : Vector , lowerCamelCase : Vector ):
return np.sqrt(np.sum((np.asarray(lowerCamelCase ) - np.asarray(lowerCamelCase )) ** 2 ) )
def __lowercase ( lowerCamelCase : Vector , lowerCamelCase : Vector ):
return sum((va - va) ** 2 for va, va in zip(lowerCamelCase , lowerCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def __lowercase ( ):
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 355 | import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __lowercase ( lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : str=None , lowerCamelCase : Dict=None , lowerCamelCase : Optional[int]=None , lowerCamelCase : Dict=None , lowerCamelCase : Optional[int]=None , ):
if attention_mask is None:
UpperCamelCase_ : int = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase_ : Dict = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase_ : Optional[Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
UpperCamelCase_ : int = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
UpperCamelCase_ : Tuple = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _lowercase :
def __init__( self : Union[str, Any] , snake_case : str , snake_case : str=1_3 , snake_case : Optional[int]=7 , snake_case : int=True , snake_case : str=False , snake_case : str=9_9 , snake_case : int=1_6 , snake_case : str=2 , snake_case : Dict=4 , snake_case : Tuple=4 , snake_case : List[Any]="relu" , snake_case : str=0.1 , snake_case : Any=0.1 , snake_case : List[str]=0.0 , snake_case : int=0.0 , snake_case : Any=2_0 , snake_case : Union[str, Any]=2 , snake_case : Tuple=1 , snake_case : Optional[int]=0 , ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = parent
UpperCamelCase_ : Optional[Any] = batch_size
UpperCamelCase_ : Tuple = seq_length
UpperCamelCase_ : Dict = is_training
UpperCamelCase_ : Tuple = use_labels
UpperCamelCase_ : Tuple = vocab_size
UpperCamelCase_ : List[str] = hidden_size
UpperCamelCase_ : List[str] = num_hidden_layers
UpperCamelCase_ : Tuple = num_attention_heads
UpperCamelCase_ : Dict = intermediate_size
UpperCamelCase_ : Dict = hidden_act
UpperCamelCase_ : int = hidden_dropout_prob
UpperCamelCase_ : str = attention_probs_dropout_prob
UpperCamelCase_ : List[Any] = encoder_layerdrop
UpperCamelCase_ : Any = decoder_layerdrop
UpperCamelCase_ : Tuple = max_position_embeddings
UpperCamelCase_ : Dict = eos_token_id
UpperCamelCase_ : int = pad_token_id
UpperCamelCase_ : str = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : Any = self.eos_token_id # Eos Token
UpperCamelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase_ : str = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ : List[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ : str = self.get_config()
UpperCamelCase_ : Any = prepare_mam_aaa_inputs_dict(snake_case , snake_case , snake_case )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : List[Any] , snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : str = MaMaaaModel(config=snake_case ).get_decoder().to(snake_case ).eval()
UpperCamelCase_ : str = inputs_dict['input_ids']
UpperCamelCase_ : Any = inputs_dict['attention_mask']
UpperCamelCase_ : Optional[int] = inputs_dict['head_mask']
# first forward pass
UpperCamelCase_ : int = model(snake_case , attention_mask=snake_case , head_mask=snake_case , use_cache=snake_case )
UpperCamelCase_, UpperCamelCase_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase_ : Union[str, Any] = model(snake_case , attention_mask=snake_case )['last_hidden_state']
UpperCamelCase_ : int = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[
'last_hidden_state'
]
# select random slice
UpperCamelCase_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-2 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : int , snake_case : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Tuple = MaMaaaModel(config=snake_case ).to(snake_case ).eval()
UpperCamelCase_ : List[str] = model(**snake_case )
UpperCamelCase_ : List[Any] = outputs.encoder_last_hidden_state
UpperCamelCase_ : Optional[int] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ : Optional[int] = model.get_encoder()
encoder.save_pretrained(snake_case )
UpperCamelCase_ : Tuple = MaMaaaEncoder.from_pretrained(snake_case ).to(snake_case )
UpperCamelCase_ : Optional[Any] = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ : int = model.get_decoder()
decoder.save_pretrained(snake_case )
UpperCamelCase_ : int = MaMaaaDecoder.from_pretrained(snake_case ).to(snake_case )
UpperCamelCase_ : int = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=snake_case , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _lowercase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowercase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowercase = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = True
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : str , snake_case : str , snake_case : Dict ) -> List[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = MaMaaaModelTester(self )
UpperCamelCase_ : Optional[Any] = ConfigTester(self , config_class=snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase_ : int = model_class(snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case )
UpperCamelCase_, UpperCamelCase_ : str = model_class.from_pretrained(snake_case , output_loading_info=snake_case )
self.assertEqual(info['missing_keys'] , [] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase_ : Optional[Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase_ : List[Any] = copy.deepcopy(self._prepare_for_class(snake_case , snake_case ) )
if not self.is_encoder_decoder:
UpperCamelCase_ : List[Any] = inputs['input_ids']
del inputs["input_ids"]
else:
UpperCamelCase_ : str = inputs['input_ids']
UpperCamelCase_ : List[str] = inputs.get('decoder_input_ids' , snake_case )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , snake_case )
UpperCamelCase_ : List[str] = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase_ : Tuple = wte(snake_case )
else:
UpperCamelCase_ : Optional[int] = wte(snake_case )
UpperCamelCase_ : Optional[int] = wte(snake_case )
with torch.no_grad():
model(**snake_case )[0]
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ : str = input_dict['input_ids']
UpperCamelCase_ : int = input_ids.ne(1 ).to(snake_case )
UpperCamelCase_ : Dict = MaMaaaForConditionalGeneration(snake_case ).eval().to(snake_case )
if torch_device == "cuda":
model.half()
model.generate(snake_case , attention_mask=snake_case )
model.generate(num_beams=4 , do_sample=snake_case , early_stopping=snake_case , num_return_sequences=3 )
def __lowercase ( lowerCamelCase : List[Any] ):
return torch.tensor(lowerCamelCase , dtype=torch.long , device=lowerCamelCase )
a_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _lowercase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(snake_case )
UpperCamelCase_ : str = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
UpperCamelCase_ : Dict = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
UpperCamelCase_ : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , snake_case , snake_case )
with torch.no_grad():
UpperCamelCase_ : Any = model(**snake_case )[0]
UpperCamelCase_ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , snake_case )
# change to expected output here
UpperCamelCase_ : Dict = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=snake_case )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(snake_case )
# change to intended input
UpperCamelCase_ : Tuple = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
UpperCamelCase_ : Union[str, Any] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
UpperCamelCase_ : Dict = prepare_mam_aaa_inputs_dict(model.config , snake_case , snake_case )
with torch.no_grad():
UpperCamelCase_ : Dict = model(**snake_case )[0]
UpperCamelCase_ : Union[str, Any] = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , snake_case )
# change to expected output here
UpperCamelCase_ : Any = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=snake_case )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(snake_case )
UpperCamelCase_ : Optional[int] = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
UpperCamelCase_ : Union[str, Any] = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase_ : Optional[Any] = tokenizer(snake_case , padding=snake_case , return_tensors='pt' )
UpperCamelCase_ : Dict = model.generate(
input_ids=dct['input_ids'].to(snake_case ) , attention_mask=dct['attention_mask'].to(snake_case ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
UpperCamelCase_ : Optional[int] = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
UpperCamelCase_ : List[str] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=snake_case , skip_special_tokens=snake_case )
assert generated == expected_en
| 50 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A : Tuple = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 184 |
from __future__ import annotations
A : Union[str, Any] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : dict[str, list[str]] , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase__ : dict[str, str | None] = {}
lowerCamelCase__ : Dict = source_vertex
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : int = {self.source_vertex}
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Dict = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase__ : Optional[int] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowerCamelCase )
lowerCamelCase__ : List[str] = vertex
queue.append(__lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase__ : Tuple = self.parent.get(__lowerCamelCase )
if target_vertex_parent is None:
lowerCamelCase__ : Tuple = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__lowerCamelCase )
return self.shortest_path(__lowerCamelCase ) + f"->{target_vertex}"
if __name__ == "__main__":
A : List[str] = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 184 | 1 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = ['''image_processor''']
UpperCamelCase_ : int = '''SamImageProcessor'''
def __init__( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
super().__init__(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
_UpperCAmelCase : Any = -1_0
_UpperCAmelCase : Dict = self.image_processor.size["longest_edge"]
def __call__( self : Any , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ : List[str] , ) -> BatchEncoding:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
# pop arguments that are not used in the foward but used nevertheless
_UpperCAmelCase : Optional[int] = encoding_image_processor["original_sizes"]
if hasattr(lowerCAmelCase__ , "numpy" ): # Checks if Torch or TF tensor
_UpperCAmelCase : List[Any] = original_sizes.numpy()
_UpperCAmelCase : Union[str, Any] = self._check_and_preprocess_points(
input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , input_boxes=lowerCAmelCase__ , )
_UpperCAmelCase : Union[str, Any] = self._normalize_and_convert(
lowerCAmelCase__ , lowerCAmelCase__ , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , input_boxes=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , )
return encoding_image_processor
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Union[str, Any]="pt" , ) -> Union[str, Any]:
"""simple docstring"""
if input_points is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
_UpperCAmelCase : str = [
self._normalize_coordinates(self.target_size , lowerCAmelCase__ , original_sizes[0] ) for point in input_points
]
else:
_UpperCAmelCase : Tuple = [
self._normalize_coordinates(self.target_size , lowerCAmelCase__ , lowerCAmelCase__ )
for point, original_size in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_UpperCAmelCase : Optional[Any] = self._pad_points_and_labels(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = np.array(lowerCAmelCase__ )
if input_labels is not None:
_UpperCAmelCase : Union[str, Any] = np.array(lowerCAmelCase__ )
if input_boxes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
_UpperCAmelCase : Dict = [
self._normalize_coordinates(self.target_size , lowerCAmelCase__ , original_sizes[0] , is_bounding_box=lowerCAmelCase__ )
for box in input_boxes
]
else:
_UpperCAmelCase : Dict = [
self._normalize_coordinates(self.target_size , lowerCAmelCase__ , lowerCAmelCase__ , is_bounding_box=lowerCAmelCase__ )
for box, original_size in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
_UpperCAmelCase : Tuple = np.array(lowerCAmelCase__ )
if input_boxes is not None:
if return_tensors == "pt":
_UpperCAmelCase : Optional[int] = torch.from_numpy(lowerCAmelCase__ )
# boxes batch size of 1 by default
_UpperCAmelCase : int = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_UpperCAmelCase : Optional[int] = tf.convert_to_tensor(lowerCAmelCase__ )
# boxes batch size of 1 by default
_UpperCAmelCase : Union[str, Any] = tf.expand_dims(lowerCAmelCase__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_UpperCAmelCase : Optional[Any] = torch.from_numpy(lowerCAmelCase__ )
# point batch size of 1 by default
_UpperCAmelCase : str = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_UpperCAmelCase : Optional[int] = tf.convert_to_tensor(lowerCAmelCase__ )
# point batch size of 1 by default
_UpperCAmelCase : int = tf.expand_dims(lowerCAmelCase__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_UpperCAmelCase : Dict = torch.from_numpy(lowerCAmelCase__ )
# point batch size of 1 by default
_UpperCAmelCase : List[str] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_UpperCAmelCase : Any = tf.convert_to_tensor(lowerCAmelCase__ )
# point batch size of 1 by default
_UpperCAmelCase : Optional[Any] = tf.expand_dims(lowerCAmelCase__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = max([point.shape[0] for point in input_points] )
_UpperCAmelCase : List[str] = []
for i, point in enumerate(lowerCAmelCase__ ):
if point.shape[0] != expected_nb_points:
_UpperCAmelCase : Any = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_UpperCAmelCase : Dict = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = processed_input_points
return input_points, input_labels
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=False ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase : List[Any] = original_size
_UpperCAmelCase : int = self.image_processor._get_preprocess_shape(lowerCAmelCase__ , longest_edge=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = deepcopy(lowerCAmelCase__ ).astype(lowerCAmelCase__ )
if is_bounding_box:
_UpperCAmelCase : List[Any] = coords.reshape(-1 , 2 , 2 )
_UpperCAmelCase : Optional[Any] = coords[..., 0] * (new_w / old_w)
_UpperCAmelCase : List[str] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_UpperCAmelCase : Any = coords.reshape(-1 , 4 )
return coords
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : List[str]=None , ) -> List[str]:
"""simple docstring"""
if input_points is not None:
if hasattr(lowerCAmelCase__ , "numpy" ): # Checks for TF or Torch tensor
_UpperCAmelCase : Tuple = input_points.numpy().tolist()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(input_points[0] , lowerCAmelCase__ ):
raise ValueError("Input points must be a list of list of floating points." )
_UpperCAmelCase : List[Any] = [np.array(lowerCAmelCase__ ) for input_point in input_points]
else:
_UpperCAmelCase : Union[str, Any] = None
if input_labels is not None:
if hasattr(lowerCAmelCase__ , "numpy" ):
_UpperCAmelCase : Dict = input_labels.numpy().tolist()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(input_labels[0] , lowerCAmelCase__ ):
raise ValueError("Input labels must be a list of list integers." )
_UpperCAmelCase : str = [np.array(lowerCAmelCase__ ) for label in input_labels]
else:
_UpperCAmelCase : str = None
if input_boxes is not None:
if hasattr(lowerCAmelCase__ , "numpy" ):
_UpperCAmelCase : Union[str, Any] = input_boxes.numpy().tolist()
if (
not isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
or not isinstance(input_boxes[0] , lowerCAmelCase__ )
or not isinstance(input_boxes[0][0] , lowerCAmelCase__ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_UpperCAmelCase : List[str] = [np.array(lowerCAmelCase__ ).astype(np.floataa ) for box in input_boxes]
else:
_UpperCAmelCase : str = None
return input_points, input_labels, input_boxes
@property
def _lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(lowerCAmelCase__ ) )
def _lowerCAmelCase ( self : str , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Any ) -> List[str]:
"""simple docstring"""
return self.image_processor.post_process_masks(*lowerCAmelCase__ , **lowerCAmelCase__ ) | 350 | '''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__a = (3, 9, -11, 0, 7, 5, 1, -1)
__a = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A__ :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : Iterable[int] ) -> None:
"""simple docstring"""
_UpperCAmelCase : Node | None = None
for i in sorted(lowerCAmelCase__ , reverse=lowerCAmelCase__ ):
_UpperCAmelCase : str = Node(lowerCAmelCase__ , self.head )
def __iter__( self : int ) -> Iterator[int]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.head
while node:
yield node.data
_UpperCAmelCase : List[str] = node.next_node
def __len__( self : Any ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return " -> ".join([str(lowerCAmelCase__ ) for node in self] )
def __UpperCAmelCase ( a_: SortedLinkedList, a_: SortedLinkedList ):
return SortedLinkedList(list(a_ ) + list(a_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 17 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = '▁'
UpperCAmelCase_ = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
UpperCAmelCase_ = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
UpperCAmelCase_ = {
'facebook/s2t-small-librispeech-asr': 1024,
}
UpperCAmelCase_ = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
UpperCAmelCase_ = {'mustc': MUSTC_LANGS}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : str = VOCAB_FILES_NAMES
a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] = MAX_MODEL_INPUT_SIZES
a : Union[str, Any] = ["input_ids", "attention_mask"]
a : List[int] = []
def __init__( self, __magic_name__, __magic_name__, __magic_name__="<s>", __magic_name__="</s>", __magic_name__="<pad>", __magic_name__="<unk>", __magic_name__=False, __magic_name__=False, __magic_name__=None, __magic_name__=None, __magic_name__ = None, **__magic_name__, ) -> None:
"""simple docstring"""
UpperCamelCase__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__magic_name__, eos_token=__magic_name__, unk_token=__magic_name__, pad_token=__magic_name__, do_upper_case=__magic_name__, do_lower_case=__magic_name__, tgt_lang=__magic_name__, lang_codes=__magic_name__, sp_model_kwargs=self.sp_model_kwargs, **__magic_name__, )
UpperCamelCase__ : List[Any] = do_upper_case
UpperCamelCase__ : str = do_lower_case
UpperCamelCase__ : Union[str, Any] = load_json(__magic_name__ )
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : str = spm_file
UpperCamelCase__ : Any = load_spm(__magic_name__, self.sp_model_kwargs )
if lang_codes is not None:
UpperCamelCase__ : str = lang_codes
UpperCamelCase__ : Optional[int] = LANGUAGES[lang_codes]
UpperCamelCase__ : int = [f"<lang:{lang}>" for lang in self.langs]
UpperCamelCase__ : Tuple = {lang: self.sp_model.PieceToId(f"<lang:{lang}>" ) for lang in self.langs}
UpperCamelCase__ : Optional[int] = self.lang_tokens
UpperCamelCase__ : int = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
UpperCamelCase__ : int = {}
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = new_tgt_lang
self.set_tgt_lang_special_tokens(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.lang_code_to_id[tgt_lang]
UpperCamelCase__ : Tuple = [lang_code_id]
def UpperCamelCase__ ( self, __magic_name__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__magic_name__, out_type=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
return self.encoder.get(__magic_name__, self.encoder[self.unk_token] )
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
return self.decoder.get(__magic_name__, self.unk_token )
def UpperCamelCase__ ( self, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Tuple = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCamelCase__ : Optional[int] = self.sp_model.decode(__magic_name__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCamelCase__ : str = []
else:
current_sub_tokens.append(__magic_name__ )
UpperCamelCase__ : List[Any] = self.sp_model.decode(__magic_name__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__, token_ids_a=__magic_name__, already_has_special_tokens=__magic_name__ )
UpperCamelCase__ : int = [1] * len(self.prefix_tokens )
UpperCamelCase__ : Union[str, Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Any = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.__dict__.copy()
UpperCamelCase__ : Union[str, Any] = None
return state
def __setstate__( self, __magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
UpperCamelCase__ : Tuple = {}
UpperCamelCase__ : List[str] = load_spm(self.spm_file, self.sp_model_kwargs )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = Path(__magic_name__ )
assert save_dir.is_dir(), f"{save_directory} should be a directory"
UpperCamelCase__ : List[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
UpperCamelCase__ : Tuple = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder, __magic_name__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file, __magic_name__ )
elif not os.path.isfile(self.spm_file ):
with open(__magic_name__, '''wb''' ) as fi:
UpperCamelCase__ : int = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (str(__magic_name__ ), str(__magic_name__ ))
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
UpperCamelCase__ : List[Any] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Union[Dict, List]:
with open(__UpperCAmelCase , '''r''' ) as f:
return json.load(__UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: str ) -> None:
with open(__UpperCAmelCase , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 201 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
UpperCAmelCase_ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: int , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: str , __UpperCAmelCase: Any ) -> List[str]:
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : Tuple = '''lm_head'''
UpperCamelCase__ : Optional[int] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
UpperCamelCase__ : List[str] = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
UpperCamelCase__ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCamelCase__ : List[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : List[str] = value
elif weight_type == "weight_v":
UpperCamelCase__ : str = value
elif weight_type == "bias":
UpperCamelCase__ : Tuple = value
else:
UpperCamelCase__ : int = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Dict , __UpperCAmelCase: str ) -> List[Any]:
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : str = fairseq_model.state_dict()
UpperCamelCase__ : Optional[int] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
UpperCamelCase__ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : int = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCamelCase__ : Tuple = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
UpperCamelCase__ : Optional[Any] = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
UpperCamelCase__ : List[str] = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase__ : Dict = '''weight_v'''
elif "bias" in name:
UpperCamelCase__ : List[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Optional[Any] = '''weight'''
else:
UpperCamelCase__ : List[Any] = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Any , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Tuple ) -> Optional[int]:
UpperCamelCase__ : List[str] = full_name.split('''conv_layers.''' )[-1]
UpperCamelCase__ : List[str] = name.split('''.''' )
UpperCamelCase__ : str = int(items[0] )
UpperCamelCase__ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCamelCase__ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCamelCase__ : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCamelCase__ : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCamelCase__ : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Dict=None , __UpperCAmelCase: Optional[Any]=None , __UpperCAmelCase: Optional[int]=True ) -> Union[str, Any]:
if config_path is not None:
UpperCamelCase__ : str = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
UpperCamelCase__ : List[Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : str = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : Any = target_dict.pad_index
UpperCamelCase__ : str = target_dict.bos_index
UpperCamelCase__ : Any = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols )
UpperCamelCase__ : List[str] = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
UpperCamelCase__ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Optional[Any] = 42
UpperCamelCase__ : List[str] = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Dict = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
UpperCamelCase__ : List[Any] = True if config.feat_extract_norm == '''layer''' else False
UpperCamelCase__ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
UpperCamelCase__ : str = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = UniSpeechForCTC(__UpperCAmelCase )
else:
UpperCamelCase__ : Any = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCamelCase__ : Tuple = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCAmelCase_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 201 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict=13 , lowerCAmelCase_ : Union[str, Any]=10 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Tuple=32 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : int=37 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Any=10 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : List[Any]="divided_space_time" , lowerCAmelCase_ : Optional[int]=None , ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : Any = patch_size
UpperCAmelCase_ : Any = num_frames
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : str = attention_type
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : Optional[int] = scope
UpperCAmelCase_ : Tuple = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
UpperCAmelCase_ : int = (image_size // patch_size) ** 2
UpperCAmelCase_ : int = (num_frames) * self.num_patches_per_frame + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
UpperCAmelCase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Any = None
if self.use_labels:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
UpperCAmelCase_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
UpperCAmelCase_ : Dict = self.num_labels
return config
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> int:
UpperCAmelCase_ : int = TimesformerModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : Dict = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ) -> Tuple:
UpperCAmelCase_ : int = TimesformerForVideoClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ )
# verify the logits shape
UpperCAmelCase_ : Dict = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (__lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__magic_name__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : str = TimesformerModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(
self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any=False ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = copy.deepcopy(lowerCAmelCase_ )
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
UpperCAmelCase_ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : str = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class(lowerCAmelCase_ )
UpperCAmelCase_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = TimesformerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
if not self.has_attentions:
pass
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[Any] = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.seq_length
UpperCAmelCase_ : Any = self.model_tester.num_frames
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
UpperCAmelCase_ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : List[Any] = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
UpperCAmelCase_ : List[str] = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
UpperCAmelCase_ : str = len(lowerCAmelCase_ )
# Check attention is always last and order is fine
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Any = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase_ ) )
UpperCAmelCase_ : List[Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
def check_hidden_states_output(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[str] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
UpperCAmelCase_ : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case ( ):
UpperCAmelCase_ : Dict = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" ,filename="eating_spaghetti.npy" ,repo_type="dataset" )
UpperCAmelCase_ : int = np.load(snake_case__ )
return list(snake_case__ )
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
UpperCAmelCase_ : int = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
lowerCAmelCase_ )
UpperCAmelCase_ : str = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_video()
UpperCAmelCase_ : Tuple = image_processor(video[:8] , return_tensors="pt" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**lowerCAmelCase_ )
# verify the logits
UpperCAmelCase_ : Optional[Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 365 |
"""simple docstring"""
from math import factorial
def snake_case ( A__ = 1_00 ):
return sum(int(A__ ) for x in str(factorial(A__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 253 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : Optional[Any] =logging.get_logger(__name__)
a__ : Union[str, Any] ={
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class snake_case ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] ="table-transformer"
SCREAMING_SNAKE_CASE_ : Any =["past_key_values"]
SCREAMING_SNAKE_CASE_ : List[str] ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Union[str, Any] , __A : List[Any]=True , __A : Dict=None , __A : Union[str, Any]=3 , __A : Optional[int]=1_0_0 , __A : List[Any]=6 , __A : Dict=2_0_4_8 , __A : int=8 , __A : Tuple=6 , __A : Union[str, Any]=2_0_4_8 , __A : Union[str, Any]=8 , __A : int=0.0 , __A : Dict=0.0 , __A : Tuple=True , __A : int="relu" , __A : Any=2_5_6 , __A : Tuple=0.1 , __A : int=0.0 , __A : List[str]=0.0 , __A : str=0.02 , __A : List[str]=1.0 , __A : Any=False , __A : Tuple="sine" , __A : int="resnet50" , __A : Optional[int]=True , __A : Optional[Any]=False , __A : str=1 , __A : List[Any]=5 , __A : Optional[Any]=2 , __A : int=1 , __A : List[str]=1 , __A : Union[str, Any]=5 , __A : str=2 , __A : int=0.1 , **__A : Dict , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__UpperCamelCase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(__A , __A ):
__UpperCamelCase = backbone_config.get('model_type' )
__UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase = config_class.from_dict(__A )
# set timm attributes to None
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None, None, None
__UpperCamelCase = use_timm_backbone
__UpperCamelCase = backbone_config
__UpperCamelCase = num_channels
__UpperCamelCase = num_queries
__UpperCamelCase = d_model
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = init_xavier_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = encoder_layers
__UpperCamelCase = auxiliary_loss
__UpperCamelCase = position_embedding_type
__UpperCamelCase = backbone
__UpperCamelCase = use_pretrained_backbone
__UpperCamelCase = dilation
# Hungarian matcher
__UpperCamelCase = class_cost
__UpperCamelCase = bbox_cost
__UpperCamelCase = giou_cost
# Loss coefficients
__UpperCamelCase = mask_loss_coefficient
__UpperCamelCase = dice_loss_coefficient
__UpperCamelCase = bbox_loss_coefficient
__UpperCamelCase = giou_loss_coefficient
__UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__A , **__A )
@property
def _lowerCamelCase ( self : int ):
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self : Any ):
return self.d_model
class snake_case ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =version.parse("1.11" )
@property
def _lowerCamelCase ( self : Dict ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def _lowerCamelCase ( self : Union[str, Any] ):
return 1e-5
@property
def _lowerCamelCase ( self : Union[str, Any] ):
return 1_2
| 53 |
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return " ".join(
''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 334 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 352 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int:
"""simple docstring"""
_lowerCAmelCase = limit + 1
_lowerCAmelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }') | 317 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['OwlViTFeatureExtractor']
lowercase = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 178 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
def __magic_name__( self :List[Any] ) -> Tuple:
return {}
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__SCREAMING_SNAKE_CASE : str = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__( self :Any ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__( self :Optional[int] ) -> Any:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()[0]
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__SCREAMING_SNAKE_CASE : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__SCREAMING_SNAKE_CASE : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 9 | 0 |
import torch
from transformers import AutoModel
class snake_case_ (torch.nn.Module ):
def __init__( self :List[Any] ,__snake_case :List[str]="sayef/fsner-bert-base-uncased" ) -> str:
super(__snake_case ,self ).__init__()
a__ = AutoModel.from_pretrained(__snake_case ,return_dict=__snake_case )
a__ = torch.nn.CosineSimilarity(3 ,1E-08 )
a__ = torch.nn.Softmax(dim=1 )
def lowerCamelCase__( self :Dict ,**__snake_case :Any ) -> List[Any]:
return self.bert(**__snake_case ).last_hidden_state
def lowerCamelCase__( self :Any ,__snake_case :List[str] ) -> Any:
return token_embeddings.sum(2 ,keepdim=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :str=1 ) -> Optional[Any]:
return self.softmax(T * self.cos(__snake_case ,__snake_case ) )
def lowerCamelCase__( self :Any ,__snake_case :Union[str, Any] ,__snake_case :Union[str, Any] ) -> Any:
a__ = W_supports['sizes'].tolist()
a__ = W_supports['start_token_id'].item()
a__ = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
a__ = self.BERT(**__snake_case )
a__ = self.BERT(**__snake_case )
a__ = None
a__ = None
a__ = W_supports['input_ids'] == start_token_id
a__ = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__snake_case ):
if i == 0:
a__ = 0
else:
a__ = support_sizes[i - 1]
a__ = S[s : s + size][start_token_masks[s : s + size]]
a__ = S[s : s + size][end_token_masks[s : s + size]]
a__ = torch.matmul(q[i] ,s_start.T ).sum(1 ).softmax(0 )
a__ = torch.matmul(q[i] ,s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
a__ = torch.vstack((p_starts, p_start) )
a__ = torch.vstack((p_ends, p_end) )
else:
a__ = p_start
a__ = p_end
return p_starts, p_ends
| 357 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :List[Any] ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase__( self :int ) -> Optional[Any]:
a__ , a__ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' ,from_pt=__snake_case ,dtype=jnp.bfloataa )
a__ , a__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,controlnet=__snake_case ,from_pt=__snake_case ,dtype=jnp.bfloataa )
a__ = controlnet_params
a__ = 'bird'
a__ = jax.device_count()
a__ = pipe.prepare_text_inputs([prompts] * num_samples )
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
a__ = pipe.prepare_image_inputs([canny_image] * num_samples )
a__ = jax.random.PRNGKey(0 )
a__ = jax.random.split(__snake_case ,jax.device_count() )
a__ = replicate(__snake_case )
a__ = shard(__snake_case )
a__ = shard(__snake_case )
a__ = pipe(
prompt_ids=__snake_case ,image=__snake_case ,params=__snake_case ,prng_seed=__snake_case ,num_inference_steps=50 ,jit=__snake_case ,).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a__ = images[0, 2_53:2_56, 2_53:2_56, -1]
a__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a__ = jnp.array(
[0.16_79_69, 0.11_66_99, 0.08_15_43, 0.15_42_97, 0.13_28_12, 0.10_88_87, 0.16_99_22, 0.16_99_22, 0.20_50_78] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ , a__ = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' ,from_pt=__snake_case ,dtype=jnp.bfloataa )
a__ , a__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,controlnet=__snake_case ,from_pt=__snake_case ,dtype=jnp.bfloataa )
a__ = controlnet_params
a__ = 'Chef in the kitchen'
a__ = jax.device_count()
a__ = pipe.prepare_text_inputs([prompts] * num_samples )
a__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
a__ = pipe.prepare_image_inputs([pose_image] * num_samples )
a__ = jax.random.PRNGKey(0 )
a__ = jax.random.split(__snake_case ,jax.device_count() )
a__ = replicate(__snake_case )
a__ = shard(__snake_case )
a__ = shard(__snake_case )
a__ = pipe(
prompt_ids=__snake_case ,image=__snake_case ,params=__snake_case ,prng_seed=__snake_case ,num_inference_steps=50 ,jit=__snake_case ,).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a__ = images[0, 2_53:2_56, 2_53:2_56, -1]
a__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a__ = jnp.array(
[[0.27_14_84, 0.26_17_19, 0.27_53_91, 0.27_73_44, 0.27_92_97, 0.29_10_16, 0.29_49_22, 0.30_27_34, 0.30_27_34]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 109 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_snake_case : List[str] = logging.get_logger(__name__)
class a (lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : str , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Optional[int] ) -> None:
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 123 | """simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCAmelCase ( ) -> int:
snake_case_ = HfArgumentParser(UpperCAmelCase )
snake_case_ = parser.parse_args_into_dataclasses()[0]
snake_case_ = TensorFlowBenchmark(args=UpperCAmelCase )
try:
snake_case_ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
snake_case_ = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
snake_case_ = ' '.join(str(UpperCAmelCase ).split(' ' )[:-1] )
snake_case_ = ''
snake_case_ = eval(str(UpperCAmelCase ).split(' ' )[-1] )
snake_case_ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
snake_case_ = full_error_msg + begin_error_msg + str(UpperCAmelCase )
raise ValueError(UpperCAmelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 69 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Any = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE : str = '''layoutlmv3'''
def __init__(self , lowercase=50265 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-5 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=1024 , lowercase=128 , lowercase=128 , lowercase=True , lowercase=32 , lowercase=128 , lowercase=64 , lowercase=256 , lowercase=True , lowercase=True , lowercase=True , lowercase=224 , lowercase=3 , lowercase=16 , lowercase=None , **lowercase , ):
super().__init__(
vocab_size=A__ , hidden_size=A__ , num_hidden_layers=A__ , num_attention_heads=A__ , intermediate_size=A__ , hidden_act=A__ , hidden_dropout_prob=A__ , attention_probs_dropout_prob=A__ , max_position_embeddings=A__ , type_vocab_size=A__ , initializer_range=A__ , layer_norm_eps=A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ , )
A_ : Optional[Any] = max_ad_position_embeddings
A_ : Optional[int] = coordinate_size
A_ : List[str] = shape_size
A_ : Any = has_relative_attention_bias
A_ : List[Any] = rel_pos_bins
A_ : Optional[int] = max_rel_pos
A_ : List[Any] = has_spatial_attention_bias
A_ : int = rel_ad_pos_bins
A_ : List[str] = max_rel_ad_pos
A_ : Any = text_embed
A_ : Optional[Any] = visual_embed
A_ : int = input_size
A_ : List[str] = num_channels
A_ : str = patch_size
A_ : int = classifier_dropout
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE : List[Any] = version.parse('1.12' )
@property
def _a (self ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def _a (self ):
return 1E-5
@property
def _a (self ):
return 12
def _a (self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 3 , lowercase = 40 , lowercase = 40 , ):
setattr(processor.image_processor , """apply_ocr""" , A__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A_ : Tuple = compute_effective_axis_dimension(
A__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A_ : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(A__ )
A_ : Optional[int] = compute_effective_axis_dimension(
A__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A__ )
# Generate dummy inputs according to compute batch and sequence
A_ : List[Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
A_ : List[str] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
A_ : Any = self._generate_dummy_images(A__ , A__ , A__ , A__ )
A_ : List[Any] = dict(
processor(
A__ , text=A__ , boxes=A__ , return_tensors=A__ , ) )
return inputs | 359 |
'''simple docstring'''
def a ( ):
'''simple docstring'''
A_ : Optional[int] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
A_ : Dict = 6
A_ : List[Any] = 1
A_ : Optional[Any] = 19_01
A_ : Tuple = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
A_ : Optional[Any] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
A_ : str = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
A_ : List[str] = day - days_per_month[month - 2]
if month > 12:
year += 1
A_ : Tuple = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution()) | 135 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , ) -> str:
lowercase__ : Any = size if size is not None else {'shortest_edge': 2_0}
lowercase__ : Tuple = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowercase__ : int = parent
lowercase__ : Dict = batch_size
lowercase__ : Tuple = num_channels
lowercase__ : str = image_size
lowercase__ : Tuple = min_resolution
lowercase__ : List[str] = max_resolution
lowercase__ : Dict = do_resize
lowercase__ : Optional[int] = size
lowercase__ : Optional[int] = do_center_crop
lowercase__ : int = crop_size
def _UpperCAmelCase ( self ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase):
lowerCamelCase__ : List[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Optional[Any] = MobileNetVaImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_center_crop' ) )
self.assertTrue(hasattr(a , 'crop_size' ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
lowercase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
def _UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : str = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Optional[int]:
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 77 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =10
SCREAMING_SNAKE_CASE_: Dict =datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
SCREAMING_SNAKE_CASE_: Tuple =datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
_UpperCAmelCase = """\
Text data.
Second line of data."""
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """file.txt"""
SCREAMING_SNAKE_CASE_: str =FILE_CONTENT
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
import bza
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =bytes(lowercase , """utf-8""" )
with bza.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
import gzip
SCREAMING_SNAKE_CASE_: List[str] =str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
SCREAMING_SNAKE_CASE_: Dict =bytes(lowercase , """utf-8""" )
with gzip.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
SCREAMING_SNAKE_CASE_: Tuple =tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
SCREAMING_SNAKE_CASE_: List[Any] =bytes(lowercase , """utf-8""" )
with lza.frame.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
SCREAMING_SNAKE_CASE_: Tuple =tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowercase , """w""" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import tarfile
SCREAMING_SNAKE_CASE_: List[Any] =tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
import lzma
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
SCREAMING_SNAKE_CASE_: List[Any] =bytes(lowercase , """utf-8""" )
with lzma.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import zipfile
SCREAMING_SNAKE_CASE_: str =tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
SCREAMING_SNAKE_CASE_: Tuple =tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
SCREAMING_SNAKE_CASE_: Dict =bytes(lowercase , """utf-8""" )
with zstd.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =tmp_path_factory.mktemp("""data""" ) / """file.xml"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return filename
_UpperCAmelCase = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
_UpperCAmelCase = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
_UpperCAmelCase = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
_UpperCAmelCase = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
_UpperCAmelCase = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =datasets.Dataset.from_dict(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
SCREAMING_SNAKE_CASE_: int =con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowercase , """w""" , newline="""""" ) as f:
SCREAMING_SNAKE_CASE_: int =csv.DictWriter(lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowercase , """w""" , newline="""""" ) as f:
SCREAMING_SNAKE_CASE_: Tuple =csv.DictWriter(lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import bza
SCREAMING_SNAKE_CASE_: Optional[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowercase , """rb""" ) as f:
SCREAMING_SNAKE_CASE_: Optional[int] =f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
SCREAMING_SNAKE_CASE_: List[Any] =pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowercase , """wb""" ) as f:
SCREAMING_SNAKE_CASE_: int =pq.ParquetWriter(lowercase , schema=lowercase )
SCREAMING_SNAKE_CASE_: str =pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""data""": DATA}
with open(lowercase , """w""" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
SCREAMING_SNAKE_CASE_: Tuple ={"""data""": DATA_DICT_OF_LISTS}
with open(lowercase , """w""" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import gzip
SCREAMING_SNAKE_CASE_: Any =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowercase , """rb""" ) as orig_file:
with gzip.open(lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
import gzip
SCREAMING_SNAKE_CASE_: int =str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowercase , """rb""" ) as orig_file:
with gzip.open(lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""nested""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.join("""nested""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE_: Dict =str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =["""0""", """1""", """2""", """3"""]
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowercase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
SCREAMING_SNAKE_CASE_: List[Any] =str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ):
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 173 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
__UpperCAmelCase = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
__UpperCAmelCase = {
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ) -> List[str]:
lowerCAmelCase__ = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCAmelCase__ = bs[:]
lowerCAmelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ = [chr(A ) for n in cs]
return dict(zip(A , A ) )
def _snake_case ( A ) -> Optional[Any]:
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
return pairs
class a__ ( lowercase_ ):
'''simple docstring'''
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="replace" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_=False , **lowerCamelCase_ , ) -> Tuple:
lowerCAmelCase__ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else bos_token
lowerCAmelCase__ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else eos_token
lowerCAmelCase__ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else sep_token
lowerCAmelCase__ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else cls_token
lowerCAmelCase__ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token
lowerCAmelCase__ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
errors=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , **a__ , )
with open(a__ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(a__ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ = errors # how to handle errors in decoding
lowerCAmelCase__ = bytes_to_unicode()
lowerCAmelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(a__ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ = dict(zip(a__ , range(len(a__ ) ) ) )
lowerCAmelCase__ = {}
lowerCAmelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return len(self.encoder )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[Any]:
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(a__ )
lowerCAmelCase__ = get_pairs(a__ )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(a__ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(a__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(a__ ):
try:
lowerCAmelCase__ = word.index(a__ , a__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(a__ )
lowerCAmelCase__ = new_word
if len(a__ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(a__ )
lowerCAmelCase__ = ''' '''.join(a__ )
lowerCAmelCase__ = word
return word
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = []
for token in re.findall(self.pat , a__ ):
lowerCAmelCase__ = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a__ ).split(''' ''' ) )
return bpe_tokens
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Union[str, Any]:
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Union[str, Any]:
return self.decoder.get(a__ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = ''''''.join(a__ )
lowerCAmelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(a__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + '''\n''' )
lowerCAmelCase__ = 0
with open(a__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ = token_index
writer.write(''' '''.join(a__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=False , **lowerCamelCase_ ) -> int:
lowerCAmelCase__ = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a__ ) > 0 and not text[0].isspace()):
lowerCAmelCase__ = ''' ''' + text
return (text, kwargs) | 354 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''https://openaipublic.azureedge.net/jukebox/models/'''
__UpperCAmelCase = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def _snake_case ( A ) -> Union[str, Any]:
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCAmelCase__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCAmelCase__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCAmelCase__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCAmelCase__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCAmelCase__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _snake_case ( A , A , A , A ) -> Optional[int]:
lowerCAmelCase__ = {}
import re
lowerCAmelCase__ = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase__ = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase__ = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCAmelCase__ = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCAmelCase__ = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A ):
lowerCAmelCase__ = re_encoder_block_conv_in.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase__ = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
lowerCAmelCase__ = re_encoder_block_conv_in.sub(A , A )
elif re_encoder_block_resnet.fullmatch(A ):
lowerCAmelCase__ = re_encoder_block_resnet.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase__ = {'''1''': 1, '''3''': 2}[groups[-2]]
lowerCAmelCase__ = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
lowerCAmelCase__ = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCAmelCase__ = prefix + resnet_block
lowerCAmelCase__ = re_encoder_block_resnet.sub(A , A )
elif re_encoder_block_proj_out.fullmatch(A ):
lowerCAmelCase__ = re_encoder_block_proj_out.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
lowerCAmelCase__ = re_encoder_block_proj_out.sub(A , A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A ):
lowerCAmelCase__ = re_decoder_block_conv_out.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase__ = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
lowerCAmelCase__ = re_decoder_block_conv_out.sub(A , A )
elif re_decoder_block_resnet.fullmatch(A ):
lowerCAmelCase__ = re_decoder_block_resnet.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase__ = {'''1''': 1, '''3''': 2}[groups[-2]]
lowerCAmelCase__ = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
lowerCAmelCase__ = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCAmelCase__ = prefix + resnet_block
lowerCAmelCase__ = re_decoder_block_resnet.sub(A , A )
elif re_decoder_block_proj_in.fullmatch(A ):
lowerCAmelCase__ = re_decoder_block_proj_in.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
lowerCAmelCase__ = re_decoder_block_proj_in.sub(A , A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A ):
lowerCAmelCase__ = re_prior_cond_conv_out.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase__ = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
lowerCAmelCase__ = re_prior_cond_conv_out.sub(A , A )
elif re_prior_cond_resnet.fullmatch(A ):
lowerCAmelCase__ = re_prior_cond_resnet.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase__ = {'''1''': 1, '''3''': 2}[groups[-2]]
lowerCAmelCase__ = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
lowerCAmelCase__ = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
lowerCAmelCase__ = prefix + resnet_block
lowerCAmelCase__ = re_prior_cond_resnet.sub(A , A )
elif re_prior_cond_proj_in.fullmatch(A ):
lowerCAmelCase__ = re_prior_cond_proj_in.match(A )
lowerCAmelCase__ = regex_match.groups()
lowerCAmelCase__ = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
lowerCAmelCase__ = re_prior_cond_proj_in.sub(A , A )
# keep original key
else:
lowerCAmelCase__ = original_key
lowerCAmelCase__ = replace_key(A )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
lowerCAmelCase__ = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
lowerCAmelCase__ = original_key
lowerCAmelCase__ = original_key
lowerCAmelCase__ = value
return new_dict
@torch.no_grad()
def _snake_case ( A=None , A=None ) -> str:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
lowerCAmelCase__ = requests.get(F"""{PREFIX}{file}""" , allow_redirects=A )
os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=A )
open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , '''wb''' ).write(r.content )
lowerCAmelCase__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCAmelCase__ = JukeboxConfig.from_pretrained(A )
lowerCAmelCase__ = JukeboxModel(A )
lowerCAmelCase__ = []
lowerCAmelCase__ = {}
for i, dict_name in enumerate(A ):
lowerCAmelCase__ = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['''model''']
lowerCAmelCase__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCAmelCase__ = old_dic[k]
elif k.endswith('''.w''' ):
lowerCAmelCase__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCAmelCase__ = old_dic[k]
else:
lowerCAmelCase__ = old_dic[k]
lowerCAmelCase__ = '''vqvae''' if i == 0 else F"""priors.{3 - i}"""
lowerCAmelCase__ = fix_jukebox_keys(A , model.state_dict() , A , A )
weight_dict.append(A )
lowerCAmelCase__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(A )
for i in range(len(A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A ).mkdir(exist_ok=A )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" , '''w''' ) as txtfile:
json.dump(A , A )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
return weight_dict
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
__UpperCAmelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 228 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
a__ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , **UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
__SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , UpperCAmelCase__ )
super().__init__(**UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = True , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
__SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(UpperCAmelCase__ ) == 0:
if return_attention_mask:
__SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(UpperCAmelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = "pt"
elif isinstance(UpperCAmelCase__ , (int, float, list, tuple, np.ndarray) ):
__SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(UpperCAmelCase__ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__SCREAMING_SNAKE_CASE = to_numpy(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = [to_numpy(UpperCAmelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
__SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
if not all(len(UpperCAmelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
__SCREAMING_SNAKE_CASE = []
for i in range(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
__SCREAMING_SNAKE_CASE = self._truncate(
UpperCAmelCase__ , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , truncation=UpperCAmelCase__ , )
truncated_inputs.append(UpperCAmelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
__SCREAMING_SNAKE_CASE = {}
for i in range(UpperCAmelCase__ ):
# padding
__SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=UpperCAmelCase__ , padding_strategy=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
__SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(UpperCAmelCase__ )
return BatchFeature(UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> dict:
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCAmelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__SCREAMING_SNAKE_CASE = np.ones(len(UpperCAmelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
__SCREAMING_SNAKE_CASE = max_length - len(UpperCAmelCase__ )
if self.padding_side == "right":
if return_attention_mask:
__SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
__SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__SCREAMING_SNAKE_CASE = np.pad(
UpperCAmelCase__ , UpperCAmelCase__ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
__SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__SCREAMING_SNAKE_CASE = np.pad(
UpperCAmelCase__ , UpperCAmelCase__ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , ) -> str:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) > max_length
if needs_to_be_truncated:
__SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Optional[int]=None ) -> str:
# Get padding strategy
if padding is not False:
if padding is True:
__SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = PaddingStrategy(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = padding
else:
__SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 54 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
with open(__magic_name__, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Tuple = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase__ : str = input_file.read()
UpperCamelCase__ : List[Any] = regexp.search(__magic_name__ )
return match
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
with open(__magic_name__, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Dict = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''', re.DOTALL )
UpperCamelCase__ : Any = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase__ : Tuple = regexp.finditer(__magic_name__ )
UpperCamelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = Path('''./datasets''' )
UpperCamelCase__ : Any = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__magic_name__ ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = Path('''./datasets''' )
UpperCamelCase__ : Optional[Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__magic_name__ ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 201 | 0 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> typing.Counter[int]:
_lowerCamelCase = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(snake_case , max_perimeter + 1 ):
_lowerCamelCase = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(snake_case ):
_lowerCamelCase = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 1_000 )-> int:
_lowerCamelCase = pythagorean_triple(snake_case )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'Perimeter {solution()} has maximum solutions')
| 80 |
"""simple docstring"""
from math import factorial, pi
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : int = 30 )-> float:
if not isinstance(snake_case , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(snake_case , snake_case ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
_lowerCamelCase = float(snake_case )
_lowerCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(snake_case ) )
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : int = 30 )-> float:
if not isinstance(snake_case , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(snake_case , snake_case ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
_lowerCamelCase = float(snake_case )
_lowerCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 80 | 1 |
"""simple docstring"""
import requests
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> None:
'''simple docstring'''
__snake_case : int = {'Content-Type': 'application/json'}
__snake_case : Tuple = requests.post(UpperCAmelCase_ , json={'text': message_body} , headers=UpperCAmelCase_ )
if response.status_code != 2_00:
__snake_case : List[Any] = (
'Request to slack returned an error '
F"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(UpperCAmelCase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 172 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a : Tuple= logging.get_logger(__name__)
_a : str= {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_a : Optional[int]= {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
_a : Tuple= {"facebook/blenderbot-3B": 128}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : List[Any] = ["""input_ids""", """attention_mask"""]
UpperCAmelCase : Optional[int] = BlenderbotTokenizer
def __init__(self : int , _A : Tuple=None , _A : str=None , _A : Union[str, Any]=None , _A : str="replace" , _A : List[Any]="<s>" , _A : List[Any]="</s>" , _A : Optional[int]="</s>" , _A : List[str]="<s>" , _A : Union[str, Any]="<unk>" , _A : Any="<pad>" , _A : str="<mask>" , _A : Union[str, Any]=False , _A : Optional[Any]=True , **_A : Optional[int] , ) -> int:
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
__snake_case : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , _A) != add_prefix_space:
__snake_case : Dict = getattr(_A , pre_tok_state.pop('type'))
__snake_case : int = add_prefix_space
__snake_case : Optional[int] = pre_tok_class(**_A)
__snake_case : str = add_prefix_space
__snake_case : Dict = 'post_processor'
__snake_case : Optional[int] = getattr(self.backend_tokenizer , _A , _A)
if tokenizer_component_instance:
__snake_case : Any = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : int = tuple(state['sep'])
if "cls" in state:
__snake_case : int = tuple(state['cls'])
__snake_case : Any = False
if state.get('add_prefix_space' , _A) != add_prefix_space:
__snake_case : int = add_prefix_space
__snake_case : Dict = True
if state.get('trim_offsets' , _A) != trim_offsets:
__snake_case : int = trim_offsets
__snake_case : Dict = True
if changes_to_apply:
__snake_case : List[str] = getattr(_A , state.pop('type'))
__snake_case : Optional[int] = component_class(**_A)
setattr(self.backend_tokenizer , _A , _A)
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowercase (self : Optional[int]) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def _lowercase (self : Union[str, Any] , _A : List[Any]) -> List[Any]:
__snake_case : List[str] = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else value
__snake_case : Optional[int] = value
def _lowercase (self : Tuple , *_A : int , **_A : Union[str, Any]) -> BatchEncoding:
__snake_case : List[str] = kwargs.get('is_split_into_words' , _A)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_A , **_A)
def _lowercase (self : Any , *_A : Union[str, Any] , **_A : Union[str, Any]) -> BatchEncoding:
__snake_case : Tuple = kwargs.get('is_split_into_words' , _A)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_A , **_A)
def _lowercase (self : Optional[Any] , _A : str , _A : Optional[str] = None) -> Tuple[str]:
__snake_case : List[str] = self._tokenizer.model.save(_A , name=_A)
return tuple(_A)
def _lowercase (self : Any , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : List[str] = [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowercase (self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None) -> Tuple:
return token_ids_a + [self.eos_token_id]
def _lowercase (self : Optional[int] , _A : "Conversation") -> List[int]:
__snake_case : str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(_A)
__snake_case : Union[str, Any] = ' '.join(_A)
__snake_case : List[str] = self.encode(_A)
if len(_A) > self.model_max_length:
__snake_case : str = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens.")
return input_ids
| 172 | 1 |
lowerCAmelCase = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
lowerCAmelCase = ['a', 'b', 'c', 'd', 'e']
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE )
lowercase__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowercase__ = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
lowercase__ = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
lowerCAmelCase = topological_sort('a', [], [])
print(sort)
| 93 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = ShapEImgaImgPipeline
_lowercase : Optional[Any] = ['''image''']
_lowercase : Optional[int] = ['''image''']
_lowercase : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_lowercase : Tuple = False
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
return 8
@property
def lowerCamelCase_ ( self: int ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase__ = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
lowercase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int]=0 ) -> Tuple:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self: List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = torch_device == '''cpu'''
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase__ = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 93 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a , _a = None , _a = None , _a = False , ):
snake_case_ : List[Any] = cipher_alphabet or [chr(_SCREAMING_SNAKE_CASE ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
snake_case_ : List[Any] = {
'a': 0.0_8497,
'b': 0.0_1492,
'c': 0.0_2202,
'd': 0.0_4253,
'e': 0.1_1162,
'f': 0.0_2228,
'g': 0.0_2015,
'h': 0.0_6094,
'i': 0.0_7546,
'j': 0.0_0153,
'k': 0.0_1292,
'l': 0.0_4025,
'm': 0.0_2406,
'n': 0.0_6749,
'o': 0.0_7507,
'p': 0.0_1929,
'q': 0.0_0095,
'r': 0.0_7587,
's': 0.0_6327,
't': 0.0_9356,
'u': 0.0_2758,
'v': 0.0_0978,
'w': 0.0_2560,
'x': 0.0_0150,
'y': 0.0_1994,
'z': 0.0_0077,
}
else:
# Custom frequencies dictionary
snake_case_ : Dict = frequencies_dict
if not case_sensitive:
snake_case_ : Any = ciphertext.lower()
# Chi squared statistic values
snake_case_ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_SCREAMING_SNAKE_CASE ) ):
snake_case_ : Optional[Any] = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
snake_case_ : Optional[Any] = (alphabet_letters.index(letter.lower() ) - shift) % len(
_SCREAMING_SNAKE_CASE )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
snake_case_ : Union[str, Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
snake_case_ : int = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
snake_case_ : Optional[Any] = decrypted_with_shift.lower().count(_SCREAMING_SNAKE_CASE )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case_ : Any = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case_ : Optional[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
snake_case_ : Union[str, Any] = decrypted_with_shift.count(_SCREAMING_SNAKE_CASE )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
snake_case_ : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
snake_case_ : Optional[int] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
snake_case_ : int = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_a ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
snake_case_ : int = min(
_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE , )
# Get all the data from the most likely cipher (key, decoded message)
(
snake_case_
) : str = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 264 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE = 1000000 ) ->int:
a__: int = limit + 1
a__: Optional[int] = [0] * limit
for first_term in range(1 , _SCREAMING_SNAKE_CASE ):
for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__: Any = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 290 | 0 |
def lowerCamelCase__ ( A__ : int = 1000 ):
'''simple docstring'''
__lowerCamelCase = 2**power
__lowerCamelCase = str(UpperCamelCase__ )
__lowerCamelCase = list(UpperCamelCase__ )
__lowerCamelCase = 0
for i in list_num:
sum_of_num += int(UpperCamelCase__ )
return sum_of_num
if __name__ == "__main__":
UpperCAmelCase_ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
UpperCAmelCase_ = solution(power)
print('Sum of the digits is: ', result)
| 366 |
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
for i in range(len(A__ ) - 1 , 0 , -1 ):
__lowerCamelCase = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j - 1], unsorted[j]
__lowerCamelCase = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j + 1], unsorted[j]
__lowerCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 29 | 0 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _A ( UpperCamelCase_ : int, UpperCamelCase_ : List[Any], UpperCamelCase_ : Any, UpperCamelCase_ : List[str]) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase_, UpperCamelCase_):
__lowercase = np.full((len(UpperCamelCase_), sequence_length, 2), UpperCamelCase_)
else:
__lowercase = np.full((len(UpperCamelCase_), sequence_length), UpperCamelCase_)
for i, tensor in enumerate(UpperCamelCase_):
if padding_side == "right":
if isinstance(UpperCamelCase_, UpperCamelCase_):
__lowercase = tensor[:sequence_length]
else:
__lowercase = tensor[:sequence_length]
else:
if isinstance(UpperCamelCase_, UpperCamelCase_):
__lowercase = tensor[:sequence_length]
else:
__lowercase = tensor[:sequence_length]
return out_tensor.tolist()
def _A ( UpperCamelCase_ : Dict) -> str:
'''simple docstring'''
__lowercase = ord(UpperCamelCase_)
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__lowercase = unicodedata.category(UpperCamelCase_)
if cat.startswith("P"):
return True
return False
@dataclass
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : PreTrainedTokenizerBase
__UpperCAmelCase : Union[bool, str, PaddingStrategy] = True
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : int = -1_0_0
__UpperCAmelCase : str = "pt"
def _lowercase ( self : str, UpperCAmelCase__ : List[str] ):
import torch
__lowercase = "label" if "label" in features[0].keys() else "labels"
__lowercase = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowercase = self.tokenizer.pad(
UpperCAmelCase__, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt" if labels is None else None, )
if labels is None:
return batch
__lowercase = torch.tensor(batch["entity_ids"] ).shape[1]
__lowercase = self.tokenizer.padding_side
if padding_side == "right":
__lowercase = [
list(UpperCAmelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase__ )) for label in labels
]
else:
__lowercase = [
[self.label_pad_token_id] * (sequence_length - len(UpperCAmelCase__ )) + list(UpperCAmelCase__ ) for label in labels
]
__lowercase = [feature["ner_tags"] for feature in features]
__lowercase = padding_tensor(UpperCAmelCase__, -1, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = [feature["original_entity_spans"] for feature in features]
__lowercase = padding_tensor(UpperCAmelCase__, (-1, -1), UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = {k: torch.tensor(UpperCAmelCase__, dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 17 |
SCREAMING_SNAKE_CASE :Any = 256
# Modulus to hash a string
SCREAMING_SNAKE_CASE :Union[str, Any] = 100_0003
def UpperCAmelCase ( a_ , a_ ) -> bool:
"""simple docstring"""
__A = len(a_ )
__A = len(a_ )
if p_len > t_len:
return False
__A = 0
__A = 0
__A = 1
# Calculating the hash of pattern and substring of text
for i in range(a_ ):
__A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
__A = "abc1abc12"
__A = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__A = "alskfjaldsk23adsfabcabc"
assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ )
# Test 2)
__A = "ABABX"
__A = "ABABZABABYABABX"
assert rabin_karp(a_ , a_ )
# Test 3)
__A = "AAAB"
__A = "ABAAAAAB"
assert rabin_karp(a_ , a_ )
# Test 4)
__A = "abcdabcy"
__A = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(a_ , a_ )
# Test 5)
__A = "Lü"
__A = "Lüsai"
assert rabin_karp(a_ , a_ )
__A = "Lue"
assert not rabin_karp(a_ , a_ )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 15 | 0 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
_snake_case = get_tests_dir('fixtures/dummy-config.json')
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = 0
def _lowerCamelCase ( self ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = AutoConfig.for_model("roberta" )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_lowercase : List[Any] = os.path.join(_UpperCamelCase , "fake-roberta" )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with open(os.path.join(_UpperCamelCase , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
_lowercase : Union[str, Any] = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertEqual(type(_UpperCamelCase ) , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
try:
AutoConfig.register("custom" , _UpperCamelCase )
# Wrong model type will raise an error
with self.assertRaises(_UpperCamelCase ):
AutoConfig.register("model" , _UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCamelCase ):
AutoConfig.register("bert" , _UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowercase : List[str] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCamelCase )
_lowercase : Union[str, Any] = AutoConfig.from_pretrained(_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowerCamelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCamelCase , "bert-base is not a local folder and is not a valid model identifier" ):
_lowercase : Dict = AutoConfig.from_pretrained("bert-base" )
def _lowerCamelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCamelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_lowercase : List[Any] = AutoConfig.from_pretrained(_UpperCamelCase , revision="aaaaaa" )
def _lowerCamelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCamelCase , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
_lowercase : List[str] = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowerCamelCase ( self ):
"""simple docstring"""
with self.assertRaises(_UpperCamelCase ):
_lowercase : Tuple = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCamelCase ):
_lowercase : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=_UpperCamelCase )
_lowercase : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=_UpperCamelCase )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCamelCase )
_lowercase : Any = AutoConfig.from_pretrained(_UpperCamelCase , trust_remote_code=_UpperCamelCase )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def _lowerCamelCase ( self ):
"""simple docstring"""
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Dict = 'new-model'
try:
AutoConfig.register("new-model" , _UpperCamelCase )
# If remote code is not set, the default is to use local
_lowercase : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
_lowercase : str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=_UpperCamelCase )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
_lowercase : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=_UpperCamelCase )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 199 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[Any] = ['image_processor', 'tokenizer']
_SCREAMING_SNAKE_CASE : str = 'OwlViTImageProcessor'
_SCREAMING_SNAKE_CASE : List[str] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCamelCase , )
_lowercase : Optional[int] = kwargs.pop("feature_extractor" )
_lowercase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="max_length" , _UpperCamelCase="np" , **_UpperCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCamelCase , _UpperCamelCase ) or (isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(text[0] , _UpperCamelCase )):
_lowercase : int = [self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(text[0] , _UpperCamelCase ):
_lowercase : str = []
# Maximum number of queries across batch
_lowercase : str = max([len(_UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCamelCase ) != max_num_queries:
_lowercase : List[Any] = t + [" "] * (max_num_queries - len(_UpperCamelCase ))
_lowercase : Tuple = self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
encodings.append(_UpperCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_lowercase : List[Any] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_lowercase : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_lowercase : Union[str, Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_lowercase : int = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_lowercase : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_lowercase : Dict = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_lowercase : Optional[int] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_lowercase : List[str] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_lowercase : Optional[int] = BatchEncoding()
_lowercase : List[Any] = input_ids
_lowercase : Dict = attention_mask
if query_images is not None:
_lowercase : int = BatchEncoding()
_lowercase : Any = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase ).pixel_values
_lowercase : Any = query_pixel_values
if images is not None:
_lowercase : str = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if text is not None and images is not None:
_lowercase : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_lowercase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCamelCase ) , tensor_type=_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCamelCase , )
return self.image_processor
| 199 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : int = 10 , snake_case__ : int = 22 ):
A = range(1 , snake_case__ )
A = range(1 , snake_case__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""") | 74 | from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__a : Optional[Any] = logging.get_logger(__name__)
__a : List[str] = TypeVar("""DatasetType""", Dataset, IterableDataset)
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase , (Dataset, IterableDataset) ):
if isinstance(lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowercase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}." )
if i == 0:
__lowercase , __lowercase = (
(Dataset, IterableDataset) if isinstance(lowercase , lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase , lowercase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowercase , lowercase , lowercase , info=lowercase , split=lowercase , stopping_strategy=lowercase )
else:
return _interleave_iterable_datasets(
lowercase , lowercase , lowercase , info=lowercase , split=lowercase , stopping_strategy=lowercase )
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase , (Dataset, IterableDataset) ):
if isinstance(lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowercase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}." )
if i == 0:
__lowercase , __lowercase = (
(Dataset, IterableDataset) if isinstance(lowercase , lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase , lowercase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowercase , info=lowercase , split=lowercase , axis=lowercase )
else:
return _concatenate_iterable_datasets(lowercase , info=lowercase , split=lowercase , axis=lowercase ) | 210 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A_ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Optional[int] ) -> str:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
UpperCAmelCase : List[str] = {'source': 'What is love ?', 'target': 'life'}
UpperCAmelCase : Optional[Any] = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase : Dict = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(lowercase_ , f"""{split}.{field}""" ) , 'w' ) as f:
f.write(lowercase_ )
def UpperCAmelCase_ ( self : Dict , lowercase_ : int , lowercase_ : str = "pytorch" ) -> int:
UpperCAmelCase : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase : Optional[Any] = os.path.join(lowercase_ , 'output' )
UpperCAmelCase : Union[str, Any] = os.path.join(lowercase_ , 'data' )
self._create_dummy_data(data_dir=lowercase_ )
UpperCAmelCase : str = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
UpperCAmelCase : Union[str, Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowercase_ , env=self.get_env() )
UpperCAmelCase : Optional[int] = os.path.join(lowercase_ , 'metrics.json' )
with open(lowercase_ ) as f:
UpperCAmelCase : Union[str, Any] = json.load(lowercase_ )
return result
@require_torch_gpu
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
UpperCAmelCase : Optional[Any] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : List[str] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Optional[int] = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def UpperCAmelCase_ ( self : Tuple ) -> Any:
UpperCAmelCase : Any = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 280 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase_ , int(b / 2 ) ) * actual_power(UpperCAmelCase_ , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase_ , int(b / 2 ) ) * actual_power(UpperCAmelCase_ , int(b / 2 ) )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if b < 0:
return 1 / actual_power(UpperCAmelCase_ , UpperCAmelCase_ )
return actual_power(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 280 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''YolosFeatureExtractor''']
_UpperCamelCase = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 326 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
_UpperCamelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def lowerCAmelCase__( lowercase : str ) -> Optional[Any]:
__snake_case : Optional[int] = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCAmelCase__( lowercase : List[Any] , lowercase : List[Any] , lowercase : List[Any]=rename_keys_prefix ) -> Dict:
__snake_case : Tuple = OrderedDict()
__snake_case : str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__snake_case : Optional[Any] = key
for name_pair in rename_keys_prefix:
__snake_case : List[str] = new_key.replace(name_pair[0] , name_pair[1] )
__snake_case : List[str] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__snake_case : List[Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : Any ) -> List[Any]:
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
__snake_case : Any = "pretraining"
if "vcr" in checkpoint_path:
__snake_case : Optional[Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__snake_case : Tuple = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__snake_case : Dict = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__snake_case : Any = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
__snake_case : Dict = {"visual_embedding_dim": 512}
__snake_case : Any = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__snake_case : List[Any] = {"visual_embedding_dim": 2048}
__snake_case : Optional[Any] = "vqa_advanced"
elif "vqa" in checkpoint_path:
__snake_case : Union[str, Any] = {"visual_embedding_dim": 2048, "num_labels": 3129}
__snake_case : Union[str, Any] = "vqa"
elif "nlvr" in checkpoint_path:
__snake_case : Tuple = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__snake_case : List[Any] = "nlvr"
__snake_case : Union[str, Any] = VisualBertConfig(**lowercase )
# Load State Dict
__snake_case : Any = load_state_dict(lowercase )
__snake_case : Dict = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
__snake_case : Optional[Any] = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
__snake_case : Tuple = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
__snake_case : Tuple = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
__snake_case : List[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
_UpperCamelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 326 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Optional[Any] ={
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] =["""MaskFormerFeatureExtractor"""]
A_ : int =["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] =[
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
A_ : Union[str, Any] =[
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
A_ : int =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 365 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ : int =logging.get_logger(__name__)
A_ : Tuple ={
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : int = "deta"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , a__=None , a__=9_00 , a__=20_48 , a__=6 , a__=20_48 , a__=8 , a__=6 , a__=10_24 , a__=8 , a__=0.0 , a__=True , a__="relu" , a__=2_56 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.02 , a__=1.0 , a__=True , a__=False , a__="sine" , a__=5 , a__=4 , a__=4 , a__=True , a__=3_00 , a__=True , a__=True , a__=1 , a__=5 , a__=2 , a__=1 , a__=1 , a__=5 , a__=2 , a__=0.1 , a__=0.25 , **a__ , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowerCamelCase = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(a__ , a__ ):
_lowerCamelCase = backbone_config.pop('model_type' )
_lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase = config_class.from_dict(a__ )
_lowerCamelCase = backbone_config
_lowerCamelCase = num_queries
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = d_model
_lowerCamelCase = encoder_ffn_dim
_lowerCamelCase = encoder_layers
_lowerCamelCase = encoder_attention_heads
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = activation_function
_lowerCamelCase = init_std
_lowerCamelCase = init_xavier_std
_lowerCamelCase = encoder_layerdrop
_lowerCamelCase = auxiliary_loss
_lowerCamelCase = position_embedding_type
# deformable attributes
_lowerCamelCase = num_feature_levels
_lowerCamelCase = encoder_n_points
_lowerCamelCase = decoder_n_points
_lowerCamelCase = two_stage
_lowerCamelCase = two_stage_num_proposals
_lowerCamelCase = with_box_refine
_lowerCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
_lowerCamelCase = class_cost
_lowerCamelCase = bbox_cost
_lowerCamelCase = giou_cost
# Loss coefficients
_lowerCamelCase = mask_loss_coefficient
_lowerCamelCase = dice_loss_coefficient
_lowerCamelCase = bbox_loss_coefficient
_lowerCamelCase = giou_loss_coefficient
_lowerCamelCase = eos_coefficient
_lowerCamelCase = focal_alpha
super().__init__(is_encoder_decoder=a__ , **a__ )
@property
def snake_case_ ( self ):
return self.encoder_attention_heads
@property
def snake_case_ ( self ):
return self.d_model
def snake_case_ ( self ):
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.backbone_config.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
| 80 | 0 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] ,A : List[str] ,A : str=13 ,A : str=7 ,A : List[str]=True ,A : Optional[int]=True ,A : str=True ,A : Dict=True ,A : Optional[int]=99 ,A : Optional[Any]=32 ,A : int=5 ,A : Dict=4 ,A : Optional[int]=37 ,A : Tuple="gelu" ,A : List[str]=0.1 ,A : List[str]=0.1 ,A : Any=1_28 ,A : str=32 ,A : Any=16 ,A : List[Any]=2 ,A : List[str]=0.02 ,A : Tuple=3 ,A : Optional[int]=4 ,A : Any=None ,):
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
def UpperCamelCase_ ( self : Tuple ):
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__A = ids_tensor([self.batch_size] ,self.num_choices )
__A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : int ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Tuple ):
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = self.prepare_config_and_inputs()
__A = True
__A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self : str ,A : Tuple ,A : List[str] ,A : List[Any] ,A : Optional[Any] ,A : Optional[Any] ,A : int ,A : Optional[int] ):
__A = NezhaModel(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,token_type_ids=A )
__A = model(A ,token_type_ids=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[int] ,A : Union[str, Any] ,A : Any ,A : Any ,A : Dict ,A : Union[str, Any] ,A : List[str] ,A : List[Any] ,A : Union[str, Any] ,):
__A = True
__A = NezhaModel(A )
model.to(A )
model.eval()
__A = model(
A ,attention_mask=A ,token_type_ids=A ,encoder_hidden_states=A ,encoder_attention_mask=A ,)
__A = model(
A ,attention_mask=A ,token_type_ids=A ,encoder_hidden_states=A ,)
__A = model(A ,attention_mask=A ,token_type_ids=A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : int ,A : int ,A : Any ,A : List[str] ,A : Tuple ,A : Optional[int] ,A : Any ,A : Union[str, Any] ):
__A = NezhaForMaskedLM(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[str] ,A : List[Any] ,A : Tuple ,A : Union[str, Any] ,A : Tuple ,A : Union[str, Any] ,A : List[Any] ):
__A = NezhaForNextSentencePrediction(config=A )
model.to(A )
model.eval()
__A = model(
A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def UpperCamelCase_ ( self : Dict ,A : int ,A : List[Any] ,A : Optional[Any] ,A : Union[str, Any] ,A : Any ,A : Tuple ,A : Any ):
__A = NezhaForPreTraining(config=A )
model.to(A )
model.eval()
__A = model(
A ,attention_mask=A ,token_type_ids=A ,labels=A ,next_sentence_label=A ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def UpperCamelCase_ ( self : Optional[int] ,A : int ,A : Any ,A : Optional[Any] ,A : Tuple ,A : Optional[Any] ,A : List[Any] ,A : List[Any] ):
__A = NezhaForQuestionAnswering(config=A )
model.to(A )
model.eval()
__A = model(
A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ,A : Optional[Any] ,A : List[Any] ,A : Union[str, Any] ,A : Tuple ,A : Tuple ,A : List[str] ):
__A = self.num_labels
__A = NezhaForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : str ,A : List[Any] ,A : Optional[Any] ,A : Union[str, Any] ,A : Tuple ,A : List[str] ,A : Union[str, Any] ,A : Optional[Any] ):
__A = self.num_labels
__A = NezhaForTokenClassification(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[str] ,A : Any ,A : str ,A : Optional[int] ,A : Tuple ,A : Union[str, Any] ,A : Any ):
__A = self.num_choices
__A = NezhaForMultipleChoice(config=A )
model.to(A )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__A = model(
A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Dict ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
def UpperCamelCase_ ( self : Tuple ,A : Any ,A : Union[str, Any] ,A : int=False ):
__A = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if model_class in get_values(A ):
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=A )
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
return inputs_dict
def UpperCamelCase_ ( self : int ):
__A = NezhaModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Dict ):
__A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A )
def UpperCamelCase_ ( self : Dict ):
# This regression test was failing with PyTorch < 1.3
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__A = None
self.model_tester.create_and_check_model_as_decoder(
A ,A ,A ,A ,A ,A ,A ,A ,A ,)
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def UpperCamelCase_ ( self : int ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*A )
def UpperCamelCase_ ( self : Any ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Dict ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def UpperCamelCase_ ( self : int ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = NezhaModel.from_pretrained(A )
self.assertIsNotNone(A )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self : Tuple ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__A = True
__A = model_class(config=A )
__A = self._prepare_for_class(A ,A )
__A = torch.jit.trace(
A ,(inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A ,os.path.join(A ,"bert.pt" ) )
__A = torch.jit.load(os.path.join(A ,"bert.pt" ) ,map_location=A )
loaded(inputs_dict["input_ids"].to(A ) ,inputs_dict["attention_mask"].to(A ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : List[Any] ):
__A = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
__A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__A = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__A = model(A ,attention_mask=A )[0]
__A = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape ,A )
__A = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,A ,atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : str ):
__A = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
__A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__A = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__A = model(A ,attention_mask=A )[0]
__A = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape ,A )
__A = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,A ,atol=1E-4 ) )
| 15 |
import math
def UpperCAmelCase ( a_ , a_ = 0 , a_ = 0 ) -> list:
"""simple docstring"""
__A = end or len(a_ )
for i in range(a_ , a_ ):
__A = i
__A = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__A = array[temp_index - 1]
temp_index -= 1
__A = temp_index_value
return array
def UpperCAmelCase ( a_ , a_ , a_ ) -> None: # Max Heap
"""simple docstring"""
__A = index
__A = 2 * index + 1 # Left Node
__A = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__A = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__A = right_index
if largest != index:
__A , __A = array[largest], array[index]
heapify(a_ , a_ , a_ )
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
__A = len(a_ )
for i in range(n // 2 , -1 , -1 ):
heapify(a_ , a_ , a_ )
for i in range(n - 1 , 0 , -1 ):
__A , __A = array[0], array[i]
heapify(a_ , 0 , a_ )
return array
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
__A = low
__A = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__A , __A = array[j], array[i]
i += 1
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) == 0:
return array
__A = 2 * math.ceil(math.loga(len(a_ ) ) )
__A = 1_6
return intro_sort(a_ , 0 , len(a_ ) , a_ , a_ )
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a_ )
max_depth -= 1
__A = median_of_a(a_ , a_ , start + ((end - start) // 2) + 1 , end - 1 )
__A = partition(a_ , a_ , a_ , a_ )
intro_sort(a_ , a_ , a_ , a_ , a_ )
__A = p
return insertion_sort(a_ , a_ , a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma : ').strip()
SCREAMING_SNAKE_CASE :str = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 15 | 1 |
'''simple docstring'''
import baseaa
def __UpperCAmelCase ( a_: str ):
return baseaa.baaencode(string.encode("utf-8" ) )
def __UpperCAmelCase ( a_: bytes ):
return baseaa.baadecode(a_ ).decode("utf-8" )
if __name__ == "__main__":
__a = 'Hello World!'
__a = baseaa_encode(test)
print(encoded)
__a = baseaa_decode(encoded)
print(decoded) | 367 | '''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = '''time_series_transformer'''
UpperCamelCase_ : Optional[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : str = "student_t" , lowerCAmelCase__ : str = "nll" , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase__ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : str = "gelu" , lowerCAmelCase__ : int = 6_4 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : int = 1_0_0 , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : Tuple , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = prediction_length
_UpperCAmelCase : Optional[Any] = context_length or prediction_length
_UpperCAmelCase : Optional[Any] = distribution_output
_UpperCAmelCase : Union[str, Any] = loss
_UpperCAmelCase : Dict = input_size
_UpperCAmelCase : int = num_time_features
_UpperCAmelCase : Any = lags_sequence
_UpperCAmelCase : Dict = scaling
_UpperCAmelCase : Tuple = num_dynamic_real_features
_UpperCAmelCase : Dict = num_static_real_features
_UpperCAmelCase : Union[str, Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : Optional[int] = cardinality
else:
_UpperCAmelCase : Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : List[Any] = embedding_dimension
else:
_UpperCAmelCase : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase : str = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase : Union[str, Any] = input_size * len(lowerCAmelCase__ ) + self._number_of_features
_UpperCAmelCase : str = d_model
_UpperCAmelCase : Optional[Any] = encoder_attention_heads
_UpperCAmelCase : Dict = decoder_attention_heads
_UpperCAmelCase : List[Any] = encoder_ffn_dim
_UpperCAmelCase : str = decoder_ffn_dim
_UpperCAmelCase : Dict = encoder_layers
_UpperCAmelCase : str = decoder_layers
_UpperCAmelCase : Any = dropout
_UpperCAmelCase : str = attention_dropout
_UpperCAmelCase : List[Any] = activation_dropout
_UpperCAmelCase : Dict = encoder_layerdrop
_UpperCAmelCase : Any = decoder_layerdrop
_UpperCAmelCase : Optional[Any] = activation_function
_UpperCAmelCase : Tuple = init_std
_UpperCAmelCase : List[str] = use_cache
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 17 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : List[Any] ):
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 104 |
'''simple docstring'''
import torch
from torch import nn
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int]=1 ,lowercase__ : Optional[Any]=False ):
super().__init__()
__lowercase = n_token
__lowercase = d_embed
__lowercase = d_proj
__lowercase = cutoffs + [n_token]
__lowercase = [0] + self.cutoffs
__lowercase = div_val
__lowercase = self.cutoffs[0]
__lowercase = len(self.cutoffs ) - 1
__lowercase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__lowercase = nn.Parameter(torch.zeros(self.n_clusters ,self.d_embed ) )
__lowercase = nn.Parameter(torch.zeros(self.n_clusters ) )
__lowercase = nn.ModuleList()
__lowercase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase__ ,lowercase__ ) ) )
else:
self.out_projs.append(lowercase__ )
self.out_layers.append(nn.Linear(lowercase__ ,lowercase__ ) )
else:
for i in range(len(self.cutoffs ) ):
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowercase__ ,lowercase__ ) ) )
self.out_layers.append(nn.Linear(lowercase__ ,r_idx - l_idx ) )
__lowercase = keep_order
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Any ):
if proj is None:
__lowercase = nn.functional.linear(lowercase__ ,lowercase__ ,bias=lowercase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__lowercase = nn.functional.linear(lowercase__ ,proj.t().contiguous() )
__lowercase = nn.functional.linear(lowercase__ ,lowercase__ ,bias=lowercase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Any=None ,lowercase__ : List[str]=False ):
if labels is not None:
# Shift so that tokens < n predict n
__lowercase = hidden[..., :-1, :].contiguous()
__lowercase = labels[..., 1:].contiguous()
__lowercase = hidden.view(-1 ,hidden.size(-1 ) )
__lowercase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
__lowercase = hidden.view(-1 ,hidden.size(-1 ) )
if self.n_clusters == 0:
__lowercase = self._compute_logit(lowercase__ ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
if labels is not None:
__lowercase = labels != -1_0_0
__lowercase = torch.zeros_like(lowercase__ ,dtype=hidden.dtype ,device=hidden.device )
__lowercase = (
-nn.functional.log_softmax(lowercase__ ,dim=-1 )[mask].gather(1 ,labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=-1 )
else:
# construct weights and biases
__lowercase , __lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase = self.out_layers[0].weight[l_idx:r_idx]
__lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
__lowercase = self.out_layers[i].weight
__lowercase = self.out_layers[i].bias
if i == 0:
__lowercase = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
__lowercase = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(lowercase__ )
biases.append(lowercase__ )
__lowercase , __lowercase , __lowercase = weights[0], biases[0], self.out_projs[0]
__lowercase = self._compute_logit(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=1 )
if labels is None:
__lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__lowercase = torch.zeros_like(lowercase__ ,dtype=hidden.dtype ,device=hidden.device )
__lowercase = 0
__lowercase = [0] + self.cutoffs
for i in range(len(lowercase__ ) - 1 ):
__lowercase , __lowercase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__lowercase = (labels >= l_idx) & (labels < r_idx)
__lowercase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__lowercase = labels.index_select(0 ,lowercase__ ) - l_idx
__lowercase = head_logprob.index_select(0 ,lowercase__ )
__lowercase = hidden.index_select(0 ,lowercase__ )
else:
__lowercase = hidden
if i == 0:
if labels is not None:
__lowercase = head_logprob_i.gather(1 ,target_i[:, None] ).squeeze(1 )
else:
__lowercase = head_logprob[:, : self.cutoffs[0]]
else:
__lowercase , __lowercase , __lowercase = weights[i], biases[i], self.out_projs[i]
__lowercase = self._compute_logit(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=1 )
__lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__lowercase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 ,target_i[:, None] ).squeeze(1 )
else:
__lowercase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__lowercase = logprob_i
if labels is not None:
if (hasattr(self ,'''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 ,lowercase__ ,-logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Union[str, Any] ):
if self.n_clusters == 0:
__lowercase = self._compute_logit(lowercase__ ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
return nn.functional.log_softmax(lowercase__ ,dim=-1 )
else:
# construct weights and biases
__lowercase , __lowercase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__lowercase , __lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowercase = self.out_layers[0].weight[l_idx:r_idx]
__lowercase = self.out_layers[0].bias[l_idx:r_idx]
else:
__lowercase = self.out_layers[i].weight
__lowercase = self.out_layers[i].bias
if i == 0:
__lowercase = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
__lowercase = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(lowercase__ )
biases.append(lowercase__ )
__lowercase , __lowercase , __lowercase = weights[0], biases[0], self.out_projs[0]
__lowercase = self._compute_logit(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=1 )
__lowercase = [0] + self.cutoffs
for i in range(len(lowercase__ ) - 1 ):
__lowercase , __lowercase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__lowercase = head_logprob[:, : self.cutoffs[0]]
else:
__lowercase , __lowercase , __lowercase = weights[i], biases[i], self.out_projs[i]
__lowercase = self._compute_logit(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = nn.functional.log_softmax(lowercase__ ,dim=1 )
__lowercase = head_logprob[:, -i] + tail_logprob_i
__lowercase = logprob_i
return out
| 104 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCamelCase :
'''simple docstring'''
_snake_case : int = None
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_ : List[Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Tuple = os.path.join(_UpperCamelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(_UpperCamelCase )
UpperCAmelCase_ : int = self.feature_extraction_class.from_json_file(_UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Dict = feat_extract_first.save_pretrained(_UpperCamelCase )[0]
check_json_file_has_correct_format(_UpperCamelCase )
UpperCAmelCase_ : Dict = self.feature_extraction_class.from_pretrained(_UpperCamelCase )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Any = self.feature_extraction_class()
self.assertIsNotNone(_UpperCamelCase )
| 145 |
def lowercase__ ( __snake_case : str , __snake_case : int , __snake_case : List[str] ):
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod
else:
UpperCAmelCase_ : Optional[int] = binary_exponentiation(__snake_case , n / 2 , __snake_case )
return (b * b) % mod
# a prime number
__UpperCAmelCase = 701
__UpperCAmelCase = 1000000000
__UpperCAmelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 145 | 1 |
from ....utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : Any , _A : List[str] , _A : List[Any]=None , _A : List[str]=2048 ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = config.__dict__
snake_case_ : Any = modal_hidden_size
if num_labels:
snake_case_ : List[str] = num_labels
| 327 |
'''simple docstring'''
import math
class __A :
'''simple docstring'''
def __init__(self , A=0 ) -> Dict: # a graph with Node 0,1,...,N-1
"""simple docstring"""
_a = n
_a = [
[math.inf for j in range(0 , A )] for i in range(0 , A )
] # adjacency matrix for weight
_a = [
[math.inf for j in range(0 , A )] for i in range(0 , A )
] # dp[i][j] stores minimum distance from i to j
def a__ (self , A , A , A ) -> Tuple:
"""simple docstring"""
_a = w
def a__ (self ) -> List[Any]:
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_a = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def a__ (self , A , A ) -> str:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
lowercase_ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 211 | 0 |
def UpperCamelCase ( _a , _a ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(100, 0.25) = }")
print(f"{price_plus_tax(1_25.50, 0.05) = }")
| 252 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Optional[int] ="""decision_transformer"""
lowercase : Dict =["""past_key_values"""]
lowercase : Any ={
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase_=17 , UpperCamelCase_=4 , UpperCamelCase_=128 , UpperCamelCase_=4096 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=1024 , UpperCamelCase_=3 , UpperCamelCase_=1 , UpperCamelCase_=None , UpperCamelCase_="relu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.02 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=5_0256 , UpperCamelCase_=5_0256 , UpperCamelCase_=False , UpperCamelCase_=False , **UpperCamelCase_ , ):
lowercase_ :Any = state_dim
lowercase_ :List[str] = act_dim
lowercase_ :List[str] = hidden_size
lowercase_ :int = max_ep_len
lowercase_ :List[str] = action_tanh
lowercase_ :Any = vocab_size
lowercase_ :List[Any] = n_positions
lowercase_ :List[str] = n_layer
lowercase_ :Optional[Any] = n_head
lowercase_ :int = n_inner
lowercase_ :List[str] = activation_function
lowercase_ :List[str] = resid_pdrop
lowercase_ :Dict = embd_pdrop
lowercase_ :List[Any] = attn_pdrop
lowercase_ :Union[str, Any] = layer_norm_epsilon
lowercase_ :List[str] = initializer_range
lowercase_ :Any = scale_attn_weights
lowercase_ :Union[str, Any] = use_cache
lowercase_ :Any = scale_attn_by_inverse_layer_idx
lowercase_ :Tuple = reorder_and_upcast_attn
lowercase_ :int = bos_token_id
lowercase_ :List[str] = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 252 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCAmelCase_ : int = logging.get_logger(__name__)
# General docstring
UpperCAmelCase_ : Tuple = 'ResNetConfig'
# Base docstring
UpperCAmelCase_ : List[str] = 'microsoft/resnet-50'
UpperCAmelCase_ : Optional[int] = [1, 2048, 7, 7]
# Image classification docstring
UpperCAmelCase_ : str = 'microsoft/resnet-50'
UpperCAmelCase_ : Tuple = 'tiger cat'
UpperCAmelCase_ : str = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" ) -> int:
super().__init__()
a_ : Optional[Any] = nn.Convad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=kernel_size // 2 , bias=SCREAMING_SNAKE_CASE__ )
a_ : int = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
a_ : Any = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
a_ : Tuple = self.convolution(SCREAMING_SNAKE_CASE__ )
a_ : Dict = self.normalization(SCREAMING_SNAKE_CASE__ )
a_ : Dict = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : int , SCREAMING_SNAKE_CASE__ : ResNetConfig ) -> int:
super().__init__()
a_ : str = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
a_ : Dict = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
a_ : Optional[Any] = config.num_channels
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
a_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
a_ : Optional[Any] = self.embedder(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.pooler(SCREAMING_SNAKE_CASE__ )
return embedding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 ) -> Dict:
super().__init__()
a_ : Any = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
a_ : Any = self.convolution(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = self.normalization(SCREAMING_SNAKE_CASE__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" ) -> Optional[Any]:
super().__init__()
a_ : Union[str, Any] = in_channels != out_channels or stride != 1
a_ : Union[str, Any] = (
ResNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity()
)
a_ : int = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation=SCREAMING_SNAKE_CASE__ ) , )
a_ : Any = ACTaFN[activation]
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]:
a_ : List[str] = hidden_state
a_ : Optional[Any] = self.layer(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
a_ : int = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 4 ) -> Optional[Any]:
super().__init__()
a_ : Tuple = in_channels != out_channels or stride != 1
a_ : Any = out_channels // reduction
a_ : Optional[Any] = (
ResNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity()
)
a_ : int = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ ) , )
a_ : Optional[int] = ACTaFN[activation]
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
a_ : Any = hidden_state
a_ : Dict = self.layer(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
a_ : Union[str, Any] = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : ResNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , ) -> Optional[int]:
super().__init__()
a_ : List[Any] = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
a_ : Union[str, Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) , *[layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
a_ : Any = input
for layer in self.layers:
a_ : Optional[Any] = layer(SCREAMING_SNAKE_CASE__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : ResNetConfig ) -> str:
super().__init__()
a_ : Tuple = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
SCREAMING_SNAKE_CASE__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
a_ : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE__ , config.depths[1:] ):
self.stages.append(ResNetStage(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , depth=SCREAMING_SNAKE_CASE__ ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ) -> BaseModelOutputWithNoAttention:
a_ : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a_ : str = hidden_states + (hidden_state,)
a_ : List[Any] = stage_module(SCREAMING_SNAKE_CASE__ )
if output_hidden_states:
a_ : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ , )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[Any] = ResNetConfig
snake_case__ : Optional[int] = '''resnet'''
snake_case__ : int = '''pixel_values'''
snake_case__ : str = True
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str=False ) -> Any:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : List[Any] = value
UpperCAmelCase_ : List[str] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCAmelCase_ : Union[str, Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , lowercase__ , )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE__ )
a_ : int = config
a_ : Optional[int] = ResNetEmbeddings(SCREAMING_SNAKE_CASE__ )
a_ : Any = ResNetEncoder(SCREAMING_SNAKE_CASE__ )
a_ : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
a_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
a_ : List[str] = self.embedder(SCREAMING_SNAKE_CASE__ )
a_ : str = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
a_ : str = encoder_outputs[0]
a_ : List[str] = self.pooler(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , pooler_output=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowercase__ , )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = config.num_labels
a_ : List[str] = ResNetModel(SCREAMING_SNAKE_CASE__ )
# classification head
a_ : Dict = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
a_ : str = return_dict if return_dict is not None else self.config.use_return_dict
a_ : Dict = self.resnet(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
a_ : str = outputs.pooler_output if return_dict else outputs[1]
a_ : Any = self.classifier(SCREAMING_SNAKE_CASE__ )
a_ : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a_ : Optional[Any] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a_ : Optional[Any] = 'single_label_classification'
else:
a_ : Optional[int] = 'multi_label_classification'
if self.config.problem_type == "regression":
a_ : str = MSELoss()
if self.num_labels == 1:
a_ : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a_ : List[Any] = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
a_ : Optional[int] = CrossEntropyLoss()
a_ : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a_ : List[Any] = BCEWithLogitsLoss()
a_ : Optional[Any] = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
a_ : Union[str, Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , lowercase__ , )
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
super().__init__(SCREAMING_SNAKE_CASE__ )
super()._init_backbone(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = [config.embedding_size] + config.hidden_sizes
a_ : Dict = ResNetEmbeddings(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = ResNetEncoder(SCREAMING_SNAKE_CASE__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None ) -> BackboneOutput:
a_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
a_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ : Dict = self.embedder(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = self.encoder(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = outputs.hidden_states
a_ : Dict = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
a_ : Optional[int] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=SCREAMING_SNAKE_CASE__ , )
| 32 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Tuple = ['''image_processor''', '''tokenizer''']
snake_case__ : Union[str, Any] = '''CLIPImageProcessor'''
snake_case__ : Dict = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
a_ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , SCREAMING_SNAKE_CASE__ , )
a_ : Tuple = kwargs.pop('feature_extractor' )
a_ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
a_ : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images is not None:
a_ : Dict = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None and images is not None:
a_ : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Any , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
a_ : str = self.tokenizer.model_input_names
a_ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 32 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_UpperCAmelCase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = field(
default="""cifar10""", metadata={"""help""": """Name of a dataset from the datasets package"""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """The column name of the images in the files."""} )
UpperCAmelCase__ = field(default=__UpperCamelCase, metadata={"""help""": """A folder containing the training data."""} )
UpperCAmelCase__ = field(default=__UpperCamelCase, metadata={"""help""": """A folder containing the validation data."""} )
UpperCAmelCase__ = field(
default=0.15, metadata={"""help""": """Percent to split off of train for validation."""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
}, )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
}, )
def A_ ( self : Dict ) -> List[Any]:
lowerCamelCase__ : Dict = {}
if self.train_dir is not None:
lowerCamelCase__ : str = self.train_dir
if self.validation_dir is not None:
lowerCamelCase__ : Optional[Any] = self.validation_dir
lowerCamelCase__ : Union[str, Any] = data_files if data_files else None
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
}, )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
}, )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
UpperCAmelCase__ = field(
default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, )
UpperCAmelCase__ = field(default=__UpperCamelCase, metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
}, )
UpperCAmelCase__ = field(
default=0.75, metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
UpperCAmelCase__ = field(
default=__UpperCamelCase, metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = field(
default=1E-3, metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : Any = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ : int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _UpperCAmelCase , _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase__ : Any = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase__ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
lowerCamelCase__ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase__ : Optional[Any] = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCAmelCase ) and data_args.train_val_split > 0.0:
lowerCamelCase__ : Dict = ds['train'].train_test_split(data_args.train_val_split )
lowerCamelCase__ : Dict = split['train']
lowerCamelCase__ : Any = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Optional[Any] = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase__ : int = ViTMAEConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : Optional[int] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
lowerCamelCase__ : List[Any] = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowerCamelCase__ : Dict = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
lowerCamelCase__ : str = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
lowerCamelCase__ : Dict = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowerCamelCase__ : int = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
lowerCamelCase__ : Any = ViTMAEForPreTraining(_UpperCAmelCase )
if training_args.do_train:
lowerCamelCase__ : Tuple = ds['train'].column_names
else:
lowerCamelCase__ : Tuple = ds['validation'].column_names
if data_args.image_column_name is not None:
lowerCamelCase__ : Dict = data_args.image_column_name
elif "image" in column_names:
lowerCamelCase__ : Optional[int] = 'image'
elif "img" in column_names:
lowerCamelCase__ : List[str] = 'img'
else:
lowerCamelCase__ : Any = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowerCamelCase__ : int = image_processor.size['shortest_edge']
else:
lowerCamelCase__ : Optional[int] = (image_processor.size['height'], image_processor.size['width'])
lowerCamelCase__ : str = Compose(
[
Lambda(lambda _UpperCAmelCase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_UpperCAmelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_UpperCAmelCase ):
lowerCamelCase__ : str = [transforms(_UpperCAmelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowerCamelCase__ : List[Any] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_UpperCAmelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowerCamelCase__ : List[Any] = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_UpperCAmelCase )
# Compute absolute learning rate
lowerCamelCase__ : List[str] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowerCamelCase__ : Dict = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowerCamelCase__ : Dict = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
lowerCamelCase__ : str = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ : Optional[Any] = last_checkpoint
lowerCamelCase__ : Union[str, Any] = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase__ : List[Any] = trainer.evaluate()
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
# Write model card and (optionally) push to hub
lowerCamelCase__ : Any = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCAmelCase )
else:
trainer.create_model_card(**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 364 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
lowerCamelCase__ : List[Any] = get_failure_array(_UpperCAmelCase )
# 2) Step through text searching for pattern
lowerCamelCase__ , lowerCamelCase__ : List[str] = 0, 0 # index into text, pattern
while i < len(_UpperCAmelCase ):
if pattern[j] == text[i]:
if j == (len(_UpperCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowerCamelCase__ : str = failure[j - 1]
continue
i += 1
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[int]:
lowerCamelCase__ : int = [0]
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Any = 1
while j < len(_UpperCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowerCamelCase__ : int = failure[i - 1]
continue
j += 1
failure.append(_UpperCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
_UpperCAmelCase : Union[str, Any] = """abc1abc12"""
_UpperCAmelCase : List[Any] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_UpperCAmelCase : Dict = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
_UpperCAmelCase : Any = """ABABX"""
_UpperCAmelCase : Union[str, Any] = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
_UpperCAmelCase : int = """AAAB"""
_UpperCAmelCase : str = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
_UpperCAmelCase : Optional[Any] = """abcdabcy"""
_UpperCAmelCase : List[Any] = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
_UpperCAmelCase : str = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 45 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] = torch.device('cpu')
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(_a , stream=_a ).raw )
return im
def _SCREAMING_SNAKE_CASE ( __snake_case : Tuple ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : Tuple , __snake_case : Any ):
'''simple docstring'''
lowercase = dct.pop(_a )
lowercase = val
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] ):
'''simple docstring'''
lowercase = []
for k in state_dict.keys():
lowercase = k
if ".pwconv" in k:
lowercase = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
lowercase = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
lowercase = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
lowercase = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
lowercase = k_new.split('.' )
if ls[2].isdigit():
lowercase = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
lowercase = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Any ):
'''simple docstring'''
lowercase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowercase = 10_00
lowercase = """huggingface/label-files"""
lowercase = """imagenet-1k-id2label.json"""
lowercase = json.load(open(hf_hub_download(_a , _a , repo_type='dataset' ) , 'r' ) )
lowercase = {int(_a ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowercase = [3, 3, 6, 4]
lowercase = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
lowercase = [3, 3, 9, 6]
lowercase = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
lowercase = [4, 3, 10, 5]
lowercase = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
lowercase = [4, 4, 12, 6]
lowercase = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
lowercase = torch.hub.load_state_dict_from_url(_a , map_location='cpu' , check_hash=_a )
else:
lowercase = torch.load(_a , map_location='cpu' )
lowercase = checkpoint
lowercase = create_rename_keys(_a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_a , _a , _a )
# load HuggingFace model
lowercase = SwiftFormerForImageClassification(_a ).eval()
hf_model.load_state_dict(_a )
# prepare test inputs
lowercase = prepare_img()
lowercase = ViTImageProcessor.from_pretrained('preprocessor_config' )
lowercase = processor(images=_a , return_tensors='pt' )
# compare outputs from both models
lowercase = get_expected_output(_a )
lowercase = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , _a , atol=1e-3 )
Path(_a ).mkdir(exist_ok=_a )
print(f'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(_a )
if __name__ == "__main__":
_UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_UpperCamelCase : str = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 220 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
'''simple docstring'''
def __init__( self: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any]=13 ,lowerCamelCase_: Optional[int]=32 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: int=16 ,lowerCamelCase_: Optional[Any]=[32, 64, 128] ,lowerCamelCase_: Optional[int]=[1, 2, 1] ,lowerCamelCase_: Union[str, Any]=[2, 2, 4] ,lowerCamelCase_: int=2 ,lowerCamelCase_: List[str]=2.0 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: List[str]=0.0 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Optional[int]="gelu" ,lowerCamelCase_: Any=False ,lowerCamelCase_: Dict=True ,lowerCamelCase_: Union[str, Any]=0.0_2 ,lowerCamelCase_: int=1e-5 ,lowerCamelCase_: int=True ,lowerCamelCase_: Tuple=None ,lowerCamelCase_: str=True ,lowerCamelCase_: Dict=10 ,lowerCamelCase_: str=8 ,lowerCamelCase_: Union[str, Any]=["stage1", "stage2"] ,lowerCamelCase_: Optional[Any]=[1, 2] ,) -> str:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Dict = embed_dim
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : int = num_heads
UpperCAmelCase_ : List[Any] = window_size
UpperCAmelCase_ : Union[str, Any] = mlp_ratio
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = drop_path_rate
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : List[Any] = use_absolute_embeddings
UpperCAmelCase_ : List[Any] = patch_norm
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = encoder_stride
UpperCAmelCase_ : Optional[int] = out_features
UpperCAmelCase_ : Optional[int] = out_indices
def A__ ( self: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Any = self.get_config()
return config, pixel_values, labels
def A__ ( self: List[Any] ) -> Tuple:
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ )
UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]:
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def A__ ( self: Optional[int] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = FocalNetForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = FocalNetForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ) -> int:
UpperCAmelCase_ : List[Any] = self.type_sequence_label_size
UpperCAmelCase_ : int = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = config_and_inputs
UpperCAmelCase_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Any = False
A__ : List[str] = False
A__ : Any = False
A__ : Any = False
def A__ ( self: List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = FocalNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=lowerCamelCase_ ,embed_dim=37 ,has_text_modality=lowerCamelCase_ )
def A__ ( self: List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self: List[str] ) -> Union[str, Any]:
return
def A__ ( self: str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def A__ ( self: int ) -> int:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def A__ ( self: int ) -> Dict:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def A__ ( self: Optional[Any] ) -> Optional[Any]:
pass
def A__ ( self: Optional[Any] ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Any = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase_ )
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> List[str]:
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[Any] = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
# FocalNet has a different seq_length
UpperCAmelCase_ : int = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
UpperCAmelCase_ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = reshaped_hidden_states[0].shape
UpperCAmelCase_ : List[Any] = (
reshaped_hidden_states[0].view(lowerCamelCase_ ,lowerCamelCase_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self: Any ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : str = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCAmelCase_ : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Optional[int] = True
self.check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,(padded_height, padded_width) )
@slow
def A__ ( self: Optional[int] ) -> Optional[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self: Optional[int] ) -> str:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
UpperCAmelCase_ : Dict = image_processor(images=lowerCamelCase_ ,return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowerCamelCase_ )
# verify the logits
UpperCAmelCase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : List[Any] = (FocalNetBackbone,) if is_torch_available() else ()
A__ : int = FocalNetConfig
A__ : List[str] = False
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : str = FocalNetModelTester(self )
| 345 | 0 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class A_ :
'''simple docstring'''
def __init__( self: str , a: Tuple , a: List[Any]=13 , a: List[Any]=7 , a: int=6 , a: Union[str, Any]=17 , a: Dict=23 , a: Optional[Any]=11 , a: Any=True , ):
__lowerCamelCase : Dict = parent
__lowerCamelCase : Any = batch_size
__lowerCamelCase : str = seq_length
__lowerCamelCase : Optional[int] = act_dim
__lowerCamelCase : Union[str, Any] = state_dim
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[str] = max_length
__lowerCamelCase : Union[str, Any] = is_training
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__lowerCamelCase : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__lowerCamelCase : Tuple = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCamelCase : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCamelCase : Any = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
__lowerCamelCase : Any = random_attention_mask((self.batch_size, self.seq_length) )
__lowerCamelCase : Union[str, Any] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _snake_case ( self: Union[str, Any] ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def _snake_case ( self: str , a: Tuple , a: Optional[int] , a: Optional[Any] , a: List[str] , a: List[Any] , a: Tuple , a: str , ):
__lowerCamelCase : Optional[int] = DecisionTransformerModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase : List[Any] = model(a , a , a , a , a , a )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Dict = config_and_inputs
__lowerCamelCase : Dict = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class A_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (DecisionTransformerModel,) if is_torch_available() else ()
__snake_case = ()
__snake_case = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__snake_case = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: Dict ):
__lowerCamelCase : List[str] = DecisionTransformerModelTester(self )
__lowerCamelCase : List[Any] = ConfigTester(self , config_class=a , hidden_size=37 )
def _snake_case ( self: str ):
self.config_tester.run_common_tests()
def _snake_case ( self: str ):
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
@slow
def _snake_case ( self: List[str] ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = DecisionTransformerModel.from_pretrained(a )
self.assertIsNotNone(a )
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(a )
__lowerCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Tuple = [*signature.parameters.keys()]
__lowerCamelCase : Union[str, Any] = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(a )] , a )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self: Tuple ):
__lowerCamelCase : str = 2 # number of steps of autoregressive prediction we will perform
__lowerCamelCase : List[Any] = 10 # defined by the RL environment, may be normalized
__lowerCamelCase : List[Any] = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
__lowerCamelCase : Union[str, Any] = model.to(a )
__lowerCamelCase : List[str] = model.config
torch.manual_seed(0 )
__lowerCamelCase : int = torch.randn(1 , 1 , config.state_dim ).to(device=a , dtype=torch.floataa ) # env.reset()
__lowerCamelCase : int = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=a )
__lowerCamelCase : Optional[int] = torch.tensor(a , device=a , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__lowerCamelCase : List[str] = state
__lowerCamelCase : Optional[int] = torch.zeros(1 , 0 , config.act_dim , device=a , dtype=torch.floataa )
__lowerCamelCase : str = torch.zeros(1 , 0 , device=a , dtype=torch.floataa )
__lowerCamelCase : List[Any] = torch.tensor(0 , device=a , dtype=torch.long ).reshape(1 , 1 )
for step in range(a ):
__lowerCamelCase : str = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=a )] , dim=1 )
__lowerCamelCase : Dict = torch.cat([rewards, torch.zeros(1 , 1 , device=a )] , dim=1 )
__lowerCamelCase : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = model(
states=a , actions=a , rewards=a , returns_to_go=a , timesteps=a , attention_mask=a , return_dict=a , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=a , dtype=torch.floataa ),
1.0,
False,
{},
)
__lowerCamelCase : Optional[int] = action_pred[0, -1]
__lowerCamelCase : Union[str, Any] = torch.cat([states, state] , dim=1 )
__lowerCamelCase : Optional[Any] = returns_to_go[0, -1] - reward
__lowerCamelCase : List[str] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__lowerCamelCase : str = torch.cat(
[timesteps, torch.ones((1, 1) , device=a , dtype=torch.long ) * (step + 1)] , dim=1 )
| 194 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowercase_ = {
'n_samples': 6_4,
'horizon': 3_2,
'num_inference_steps': 2_0,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
lowercase_ = 'hopper-medium-v2'
lowercase_ = gym.make(env_name)
lowercase_ = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
lowercase_ = env.reset()
lowercase_ = 0
lowercase_ = 0
lowercase_ = 1_0_0_0
lowercase_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowercase_ = pipeline(obs, planning_horizon=3_2)
# execute action in environment
lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = env.step(denorm_actions)
lowercase_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
lowercase_ = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 194 | 1 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE : Optional[int] = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
SCREAMING_SNAKE_CASE : Union[str, Any] = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
SCREAMING_SNAKE_CASE : Union[str, Any] = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
__snake_case : List[str] = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
__snake_case : List[str] = evaluate(dataset=a_ , predictions=a_ )
return score
| 102 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( _snake_case : float , _snake_case : int = 18 , _snake_case : int = 10 ) ->float:
"""simple docstring"""
__snake_case : Any = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__snake_case : int = radians(_snake_case )
__snake_case : str = angle_in_radians
__snake_case : Optional[int] = 3
__snake_case : List[Any] = -1
for _ in range(_snake_case ):
result += (b * (angle_in_radians**a)) / factorial(_snake_case )
__snake_case : int = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_snake_case , _snake_case )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 102 | 1 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowerCamelCase : List[str] = 0
lowerCamelCase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowerCamelCase : Dict = tuple[int, int]
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A , A , A , A , ) -> None:
snake_case : Any = pos_x
snake_case : Tuple = pos_y
snake_case : int = (pos_y, pos_x)
snake_case : Dict = goal_x
snake_case : int = goal_y
snake_case : Any = g_cost
snake_case : Any = parent
snake_case : Tuple = self.calculate_heuristic()
snake_case : List[str] = self.g_cost + self.h_cost
def UpperCAmelCase ( self ) -> float:
snake_case : Optional[int] = self.pos_x - self.goal_x
snake_case : Optional[Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(A ) + abs(A )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , A ) -> bool:
return self.f_cost < other.f_cost
class __lowercase :
"""simple docstring"""
def __init__( self , A , A ) -> Optional[int]:
snake_case : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , A )
snake_case : Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , A )
snake_case : Optional[int] = [self.start]
snake_case : list[Node] = []
snake_case : Tuple = False
def UpperCAmelCase ( self ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(A )
self.closed_nodes.append(A )
snake_case : Dict = self.get_successors(A )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(A )
else:
# retrieve the best current path
snake_case : Tuple = self.open_nodes.pop(self.open_nodes.index(A ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(A )
else:
self.open_nodes.append(A )
return [self.start.pos]
def UpperCAmelCase ( self , A ) -> list[Node]:
snake_case : Optional[Any] = []
for action in delta:
snake_case : Union[str, Any] = parent.pos_x + action[1]
snake_case : Any = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
A , A , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , A , ) )
return successors
def UpperCAmelCase ( self , A ) -> list[TPosition]:
snake_case : Optional[Any] = node
snake_case : Dict = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case : Dict = current_node.parent
path.reverse()
return path
class __lowercase :
"""simple docstring"""
def __init__( self , A , A ) -> None:
snake_case : Dict = AStar(A , A )
snake_case : Optional[int] = AStar(A , A )
snake_case : int = False
def UpperCAmelCase ( self ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
snake_case : Dict = self.fwd_astar.open_nodes.pop(0 )
snake_case : Tuple = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
A , A )
self.fwd_astar.closed_nodes.append(A )
self.bwd_astar.closed_nodes.append(A )
snake_case : str = current_bwd_node
snake_case : Optional[Any] = current_fwd_node
snake_case : str = {
self.fwd_astar: self.fwd_astar.get_successors(A ),
self.bwd_astar: self.bwd_astar.get_successors(A ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(A )
else:
# retrieve the best current path
snake_case : Dict = astar.open_nodes.pop(
astar.open_nodes.index(A ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(A )
else:
astar.open_nodes.append(A )
return [self.fwd_astar.start.pos]
def UpperCAmelCase ( self , A , A ) -> list[TPosition]:
snake_case : List[str] = self.fwd_astar.retrace_path(A )
snake_case : Optional[int] = self.bwd_astar.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
snake_case : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowerCamelCase : str = (0, 0)
lowerCamelCase : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase : int = time.time()
lowerCamelCase : Optional[int] = AStar(init, goal)
lowerCamelCase : str = a_star.search()
lowerCamelCase : List[str] = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
lowerCamelCase : Any = time.time()
lowerCamelCase : int = BidirectionalAStar(init, goal)
lowerCamelCase : Any = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 176 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """bart"""
_snake_case = ["""past_key_values"""]
_snake_case = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , A=5_0_2_6_5 , A=1_0_2_4 , A=1_2 , A=4_0_9_6 , A=1_6 , A=1_2 , A=4_0_9_6 , A=1_6 , A=0.0 , A=0.0 , A="gelu" , A=1_0_2_4 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=0.0 , A=False , A=True , A=3 , A=1 , A=0 , A=2 , A=True , A=2 , A=2 , **A , ) -> Any:
snake_case : Optional[int] = vocab_size
snake_case : Union[str, Any] = max_position_embeddings
snake_case : List[str] = d_model
snake_case : List[Any] = encoder_ffn_dim
snake_case : Optional[Any] = encoder_layers
snake_case : Union[str, Any] = encoder_attention_heads
snake_case : str = decoder_ffn_dim
snake_case : Union[str, Any] = decoder_layers
snake_case : Any = decoder_attention_heads
snake_case : Union[str, Any] = dropout
snake_case : List[str] = attention_dropout
snake_case : List[Any] = activation_dropout
snake_case : Optional[int] = activation_function
snake_case : Union[str, Any] = init_std
snake_case : List[str] = encoder_layerdrop
snake_case : int = decoder_layerdrop
snake_case : str = classifier_dropout
snake_case : List[str] = use_cache
snake_case : Tuple = encoder_layers
snake_case : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=A , pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , decoder_start_token_id=A , forced_eos_token_id=A , **A , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , A ):
snake_case : Any = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
snake_case : Optional[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case : Tuple = {0: """batch"""}
snake_case : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
snake_case : Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
snake_case : Any = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(A , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case , snake_case : List[Any] = self.num_layers
for i in range(A ):
snake_case : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
snake_case : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
snake_case : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
snake_case : Any = super().outputs
else:
snake_case : Any = super(A , self ).outputs
if self.use_past:
snake_case , snake_case : Any = self.num_layers
for i in range(A ):
snake_case : Any = {0: """batch""", 2: """past_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
snake_case : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , A , A , A , A )
# Generate decoder inputs
snake_case : Any = seq_length if not self.use_past else 1
snake_case : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , A , A , A , A )
snake_case : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
snake_case : List[str] = dict(**A , **A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case , snake_case : Optional[int] = common_inputs["""input_ids"""].shape
snake_case : Any = common_inputs["""decoder_input_ids"""].shape[1]
snake_case , snake_case : Optional[Any] = self.num_attention_heads
snake_case : Optional[int] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : Any = decoder_seq_length + 3
snake_case : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case : str = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(A , A )] , dim=1 )
snake_case : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case , snake_case : Any = self.num_layers
snake_case : List[str] = min(A , A )
snake_case : Dict = max(A , A ) - min_num_layers
snake_case : List[str] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(A ):
common_inputs["past_key_values"].append(
(
torch.zeros(A ),
torch.zeros(A ),
torch.zeros(A ),
torch.zeros(A ),
) )
# TODO: test this.
snake_case : Tuple = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(A , A ):
common_inputs["past_key_values"].append((torch.zeros(A ), torch.zeros(A )) )
return common_inputs
def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
snake_case : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , A , A , A , A )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case , snake_case : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case : Optional[int] = seqlen + 2
snake_case , snake_case : Tuple = self.num_layers
snake_case , snake_case : Optional[Any] = self.num_attention_heads
snake_case : Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case : Optional[Any] = common_inputs["""attention_mask"""].dtype
snake_case : int = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(A , A , dtype=A )] , dim=1 )
snake_case : Union[str, Any] = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(A )
]
return common_inputs
def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case : int = compute_effective_axis_dimension(
A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case : int = tokenizer.num_special_tokens_to_add(A )
snake_case : Tuple = compute_effective_axis_dimension(
A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A )
# Generate dummy inputs according to compute batch and sequence
snake_case : int = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case : str = dict(tokenizer(A , return_tensors=A ) )
return common_inputs
def UpperCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
snake_case : Optional[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
elif self.task == "causal-lm":
snake_case : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
else:
snake_case : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A , batch_size=A , seq_length=A , is_pair=A , framework=A )
return common_inputs
def UpperCAmelCase ( self , A , A , A , A ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
snake_case : Optional[Any] = super()._flatten_past_key_values_(A , A , A , A )
else:
snake_case : Union[str, Any] = super(A , self )._flatten_past_key_values_(
A , A , A , A )
| 176 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.