code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import numpy as np
def lowerCAmelCase_ (lowerCAmelCase__: np.ndarray , lowerCAmelCase__: float ):
"""simple docstring"""
return np.where(vector > 0 , lowerCAmelCase__ , (alpha * (np.exp(lowerCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 556 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
a : Optional[int] = logging.getLogger(__name__)
@dataclass
class _a ( _lowerCAmelCase ):
A = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''whether to use adafactor'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
A = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 556 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowercase : Any ) ->int:
"""simple docstring"""
lowercase__ = []
lowercase__ = []
lowercase__ = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowercase__ = len(lowercase ) if (len(lowercase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(lowercase ) , '''Postfix'''.center(lowercase ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowercase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowercase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowercase ) == 0:
stack.append(lowercase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowercase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowercase ) # push x to stack
print(
x.center(8 ) , (''''''.join(lowercase )).ljust(lowercase ) , (''''''.join(lowercase )).ljust(lowercase ) , sep=''' | ''' , ) # Output in tabular format
while len(lowercase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(lowercase )).ljust(lowercase ) , (''''''.join(lowercase )).ljust(lowercase ) , sep=''' | ''' , ) # Output in tabular format
return "".join(lowercase ) # return Postfix as str
def _lowerCAmelCase ( lowercase : str ) ->List[str]:
"""simple docstring"""
lowercase__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowercase ) ):
if infix[i] == "(":
lowercase__ = ')' # change "(" to ")"
elif infix[i] == ")":
lowercase__ = '(' # change ")" to "("
return (infix_2_postfix(''''''.join(lowercase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_lowerCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
_lowerCAmelCase = ''''''.join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 711 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _lowerCAmelCase ( lowercase : str , lowercase : str , lowercase : Optional[str] = None ) ->str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
lowercase__ = quote(lowercase )
return hfh.hf_hub_url(lowercase , lowercase , repo_type='''dataset''' , revision=lowercase )
| 318 | 0 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self )-> List[str]:
_SCREAMING_SNAKE_CASE = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(A_ ) )
def __magic_name__ ( self )-> Optional[int]:
_SCREAMING_SNAKE_CASE = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(A_ ) )
def __magic_name__ ( self )-> List[str]:
_SCREAMING_SNAKE_CASE = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(A_ ) )
def __magic_name__ ( self )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(A_ ) )
def __magic_name__ ( self )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(A_ ) )
def __magic_name__ ( self )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_SCREAMING_SNAKE_CASE = 'fp16'
self.assertTrue(is_safetensors_compatible(A_ , variant=A_ ) )
def __magic_name__ ( self )-> Optional[Any]:
_SCREAMING_SNAKE_CASE = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_SCREAMING_SNAKE_CASE = 'fp16'
self.assertTrue(is_safetensors_compatible(A_ , variant=A_ ) )
def __magic_name__ ( self )-> Tuple:
# pass variant but use the non-variant filenames
_SCREAMING_SNAKE_CASE = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
_SCREAMING_SNAKE_CASE = 'fp16'
self.assertTrue(is_safetensors_compatible(A_ , variant=A_ ) )
def __magic_name__ ( self )-> Union[str, Any]:
_SCREAMING_SNAKE_CASE = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_SCREAMING_SNAKE_CASE = 'fp16'
self.assertFalse(is_safetensors_compatible(A_ , variant=A_ ) )
def __magic_name__ ( self )-> Dict:
_SCREAMING_SNAKE_CASE = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
_SCREAMING_SNAKE_CASE = 'fp16'
self.assertTrue(is_safetensors_compatible(A_ , variant=A_ ) )
def __magic_name__ ( self )-> List[Any]:
# pass variant but use the non-variant filenames
_SCREAMING_SNAKE_CASE = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
_SCREAMING_SNAKE_CASE = 'fp16'
self.assertTrue(is_safetensors_compatible(A_ , variant=A_ ) )
def __magic_name__ ( self )-> Dict:
_SCREAMING_SNAKE_CASE = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
_SCREAMING_SNAKE_CASE = 'fp16'
self.assertFalse(is_safetensors_compatible(A_ , variant=A_ ) )
| 605 |
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self , A_ )-> None:
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ): # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def SCREAMING_SNAKE_CASE ( ): # Main function for testing.
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Node(1 )
_SCREAMING_SNAKE_CASE = Node(2 )
_SCREAMING_SNAKE_CASE = Node(3 )
_SCREAMING_SNAKE_CASE = Node(4 )
_SCREAMING_SNAKE_CASE = Node(5 )
_SCREAMING_SNAKE_CASE = Node(6 )
_SCREAMING_SNAKE_CASE = Node(7 )
_SCREAMING_SNAKE_CASE = Node(8 )
_SCREAMING_SNAKE_CASE = Node(9 )
print(is_full_binary_tree(UpperCAmelCase__ ) )
print(depth_of_tree(UpperCAmelCase__ ) )
print('Tree is: ' )
display(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 605 | 1 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 'owlvit_text_model'
def __init__( self ,a_=4_9408 ,a_=512 ,a_=2048 ,a_=12 ,a_=8 ,a_=16 ,a_="quick_gelu" ,a_=1e-5 ,a_=0.0 ,a_=0.02 ,a_=1.0 ,a_=0 ,a_=4_9406 ,a_=4_9407 ,**a_ ,):
"""simple docstring"""
super().__init__(pad_token_id=a_ ,bos_token_id=a_ ,eos_token_id=a_ ,**a_ )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = initializer_factor
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ,a_ ,**a_ ):
"""simple docstring"""
cls._set_token_in_kwargs(a_ )
lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(a_ ,**a_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
lowerCAmelCase__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a_ ,**a_ )
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 'owlvit_vision_model'
def __init__( self ,a_=768 ,a_=3072 ,a_=12 ,a_=12 ,a_=3 ,a_=768 ,a_=32 ,a_="quick_gelu" ,a_=1e-5 ,a_=0.0 ,a_=0.02 ,a_=1.0 ,**a_ ,):
"""simple docstring"""
super().__init__(**a_ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = initializer_factor
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ,a_ ,**a_ ):
"""simple docstring"""
cls._set_token_in_kwargs(a_ )
lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(a_ ,**a_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
lowerCAmelCase__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a_ ,**a_ )
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 'owlvit'
SCREAMING_SNAKE_CASE__ = True
def __init__( self ,a_=None ,a_=None ,a_=512 ,a_=2.6592 ,a_=True ,**a_ ,):
"""simple docstring"""
super().__init__(**a_ )
if text_config is None:
lowerCAmelCase__ = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
lowerCAmelCase__ = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
lowerCAmelCase__ = OwlViTTextConfig(**a_ )
lowerCAmelCase__ = OwlViTVisionConfig(**a_ )
lowerCAmelCase__ = projection_dim
lowerCAmelCase__ = logit_scale_init_value
lowerCAmelCase__ = return_dict
lowerCAmelCase__ = 1.0
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ,a_ ,**a_ ):
"""simple docstring"""
cls._set_token_in_kwargs(a_ )
lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(a_ ,**a_ )
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a_ ,**a_ )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ,a_ ,a_ ,**a_ ):
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = text_config
lowerCAmelCase__ = vision_config
return cls.from_dict(a_ ,**a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.text_config.to_dict()
lowerCAmelCase__ = self.vision_config.to_dict()
lowerCAmelCase__ = self.__class__.model_type
return output
class __snake_case ( SCREAMING_SNAKE_CASE ):
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return 1e-4
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = -1 ,a_ = -1 ,a_ = None ,):
"""simple docstring"""
lowerCAmelCase__ = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=a_ ,seq_length=a_ ,framework=a_ )
lowerCAmelCase__ = super().generate_dummy_inputs(
processor.image_processor ,batch_size=a_ ,framework=a_ )
return {**text_input_dict, **image_input_dict}
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return 14
| 604 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowerCAmelCase : Optional[Any] = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_lowerCAmelCase : str = {"facebook/blenderbot_small-90M": 5_1_2}
def UpperCAmelCase_ ( snake_case__ ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
lowerCAmelCase__ = set(snake_case__ )
return pairs
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
def __init__( self ,a_ ,a_ ,a_="__start__" ,a_="__end__" ,a_="__unk__" ,a_="__null__" ,**a_ ,):
"""simple docstring"""
super().__init__(unk_token=a_ ,bos_token=a_ ,eos_token=a_ ,pad_token=a_ ,**a_ )
with open(a_ ,encoding='utf-8' ) as vocab_handle:
lowerCAmelCase__ = json.load(a_ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(a_ ,encoding='utf-8' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('\n' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ = dict(zip(a_ ,range(len(a_ ) ) ) )
lowerCAmelCase__ = {}
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return dict(self.encoder ,**self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = re.sub('([.,!?()])' ,r' \1' ,a_ )
lowerCAmelCase__ = re.sub('(\')' ,r' \1 ' ,a_ )
lowerCAmelCase__ = re.sub(r'\s{2,}' ,' ' ,a_ )
if "\n" in token:
lowerCAmelCase__ = token.replace('\n' ,' __newln__' )
lowerCAmelCase__ = token.split(' ' )
lowerCAmelCase__ = []
for token in tokens:
if not len(a_ ):
continue
lowerCAmelCase__ = token.lower()
lowerCAmelCase__ = tuple(a_ )
lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
lowerCAmelCase__ = get_pairs(a_ )
if not pairs:
words.append(a_ )
continue
while True:
lowerCAmelCase__ = min(a_ ,key=lambda a_ : self.bpe_ranks.get(a_ ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(a_ ):
try:
lowerCAmelCase__ = word.index(a_ ,a_ )
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(a_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(a_ )
lowerCAmelCase__ = new_word
if len(a_ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(a_ )
lowerCAmelCase__ = '@@ '.join(a_ )
lowerCAmelCase__ = word[:-4]
lowerCAmelCase__ = word
words.append(a_ )
return " ".join(a_ )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = re.findall(r'\S+\n?' ,a_ )
for token in words:
split_tokens.extend(list(self.bpe(a_ ).split(' ' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = token.lower()
return self.encoder.get(a_ ,self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
return self.decoder.get(a_ ,self.unk_token )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = ' '.join(a_ ).replace('@@ ' ,'' ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
a_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ = os.path.join(
a_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=a_ ,ensure_ascii=a_ ) + '\n' )
lowerCAmelCase__ = 0
with open(a_ ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda a_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
lowerCAmelCase__ = token_index
writer.write(' '.join(a_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 604 | 1 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCAmelCase_ ( snake_case__ ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = args.pruning_method
lowerCAmelCase__ = args.threshold
lowerCAmelCase__ = args.model_name_or_path.rstrip('/' )
lowerCAmelCase__ = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
lowerCAmelCase__ = torch.load(os.path.join(snake_case__ , 'pytorch_model.bin' ) )
lowerCAmelCase__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase__ = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase__ = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
lowerCAmelCase__ = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
lowerCAmelCase__ = MagnitudeBinarizer.apply(inputs=snake_case__ , threshold=snake_case__ )
lowerCAmelCase__ = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase__ = name[:-6]
lowerCAmelCase__ = model[f'{prefix_}mask_scores']
lowerCAmelCase__ = TopKBinarizer.apply(snake_case__ , snake_case__ )
lowerCAmelCase__ = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase__ = name[:-6]
lowerCAmelCase__ = model[f'{prefix_}mask_scores']
lowerCAmelCase__ = ThresholdBinarizer.apply(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase__ = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase__ = name[:-6]
lowerCAmelCase__ = model[f'{prefix_}mask_scores']
lowerCAmelCase__ , lowerCAmelCase__ = -0.1, 1.1
lowerCAmelCase__ = torch.sigmoid(snake_case__ )
lowerCAmelCase__ = s * (r - l) + l
lowerCAmelCase__ = s_bar.clamp(min=0.0 , max=1.0 )
lowerCAmelCase__ = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowerCAmelCase__ = os.path.join(
os.path.dirname(snake_case__ ) , f'bertarized_{os.path.basename(snake_case__ )}' )
if not os.path.isdir(snake_case__ ):
shutil.copytree(snake_case__ , snake_case__ )
print(f'\nCreated folder {target_model_path}' )
torch.save(snake_case__ , os.path.join(snake_case__ , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
main(args)
| 193 |
def UpperCAmelCase_ ( ) -> int:
"""simple docstring"""
return 1
def UpperCAmelCase_ ( snake_case__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase_ ( snake_case__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(snake_case__ )
def UpperCAmelCase_ ( snake_case__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(snake_case__ )
def UpperCAmelCase_ ( snake_case__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(snake_case__ )
def UpperCAmelCase_ ( snake_case__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(snake_case__ )
def UpperCAmelCase_ ( snake_case__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(snake_case__ )
def UpperCAmelCase_ ( snake_case__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(snake_case__ )
def UpperCAmelCase_ ( snake_case__ = 200 ) -> int:
"""simple docstring"""
return two_pound(snake_case__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 193 | 1 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None ):
# Input as list
lowerCamelCase__ =list(poly_a or [0] )[:]
lowerCamelCase__ =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowerCamelCase__ =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowerCamelCase__ =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowerCamelCase__ =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowerCamelCase__ =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowerCamelCase__ =self.__multiply()
def _a ( self , _lowerCamelCase ):
lowerCamelCase__ =[[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(_lowerCamelCase ) <= 1:
return dft[0]
#
lowerCamelCase__ =self.c_max_length // 2
while next_ncol > 0:
lowerCamelCase__ =[[] for i in range(_lowerCamelCase )]
lowerCamelCase__ =self.root**next_ncol
# First half of next step
lowerCamelCase__ =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowerCamelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowerCamelCase__ =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowerCamelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowerCamelCase__ =new_dft
lowerCamelCase__ =next_ncol // 2
return dft[0]
def _a ( self ):
lowerCamelCase__ =self.__dft("A" )
lowerCamelCase__ =self.__dft("B" )
lowerCamelCase__ =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowerCamelCase__ =2
while next_ncol <= self.c_max_length:
lowerCamelCase__ =[[] for i in range(_lowerCamelCase )]
lowerCamelCase__ =self.root ** (next_ncol // 2)
lowerCamelCase__ =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowerCamelCase__ =new_inverse_c
next_ncol *= 2
# Unpack
lowerCamelCase__ =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ):
lowerCamelCase__ ="A = " + " + ".join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowerCamelCase__ ="B = " + " + ".join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowerCamelCase__ ="A*B = " + " + ".join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a =logging.get_logger(__name__)
class __UpperCAmelCase ( __lowerCAmelCase ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 132 | 0 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( lowercase_ : str , lowercase_ : List[str] ) -> List[str]:
'''simple docstring'''
lowercase =old_name
if "patch_embed" in old_name:
lowercase =old_name.split('''.''' )
if layer == "0":
lowercase =old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
lowercase =old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
lowercase =old_name.replace('''3''' , '''convolution2''' )
else:
lowercase =old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , lowercase_ ):
lowercase =r"\b\d{2}\b"
if bool(re.search(lowercase_ , lowercase_ ) ):
lowercase =re.search(R'''\d\.\d\d.''' , lowercase_ ).group()
else:
lowercase =re.search(R'''\d\.\d.''' , lowercase_ ).group()
if int(match[0] ) < 6:
lowercase =old_name.replace(lowercase_ , '''''' )
lowercase =trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
lowercase ="intermediate_stages." + trimmed_name
else:
lowercase =old_name.replace(lowercase_ , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
lowercase =trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
lowercase =str(int(match[2] ) - num_meta4D_last_stage )
lowercase =trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
lowercase =trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
lowercase =trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
lowercase =trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
lowercase =trimmed_name.replace('''fc2''' , '''linear_out''' )
lowercase ="last_stage." + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , lowercase_ ):
lowercase =old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
lowercase =new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowercase =new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowercase =new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
lowercase =new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
lowercase =new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
lowercase =new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
lowercase ="efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowercase =new_name.replace('''norm''' , '''layernorm''' )
lowercase ="efficientformer." + new_name
else:
lowercase ="efficientformer.encoder." + new_name
return new_name
def UpperCamelCase ( lowercase_ : Optional[int] , lowercase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
for key in checkpoint.copy().keys():
lowercase =checkpoint.pop(lowercase_ )
lowercase =val
return checkpoint
def UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
lowercase ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase =Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return image
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ) -> Any:
'''simple docstring'''
lowercase =torch.load(lowercase_ , map_location='''cpu''' )["model"]
lowercase =EfficientFormerConfig.from_json_file(lowercase_ )
lowercase =EfficientFormerForImageClassificationWithTeacher(lowercase_ )
lowercase ="_".join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
lowercase =config.depths[-1] - config.num_metaad_blocks + 1
lowercase =convert_torch_checkpoint(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
lowercase ={
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
lowercase =prepare_img()
lowercase =2_5_6
lowercase =2_2_4
lowercase =EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
lowercase =processor(images=lowercase_ , return_tensors='''pt''' ).pixel_values
# original processing pipeline
lowercase =Compose(
[
Resize(lowercase_ , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(lowercase_ ),
ToTensor(),
Normalize(lowercase_ , lowercase_ ),
] )
lowercase =image_transforms(lowercase_ ).unsqueeze(0 )
assert torch.allclose(lowercase_ , lowercase_ )
lowercase =model(lowercase_ )
lowercase =outputs.logits
lowercase =(1, 1_0_0_0)
if "l1" in model_name:
lowercase =torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :1_0] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowercase =torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :1_0] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowercase =torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(lowercase_ )
print(f'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='''Add model''' , use_temp_dir=lowercase_ , )
processor.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='''Add image processor''' , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 72 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = """▁"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""google/reformer-crime-and-punishment""": 5_2_4_2_8_8,
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case , snake_case="</s>" , snake_case="<unk>" , snake_case=[] , snake_case = None , **snake_case , ) -> None:
"""simple docstring"""
a__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case , unk_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
a__ : Union[str, Any] = vocab_file
a__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _snake_case ( self ) -> str:
"""simple docstring"""
return self.sp_model.get_piece_size()
def _snake_case ( self ) -> Dict[str, int]:
"""simple docstring"""
a__ : Dict = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
"""simple docstring"""
a__ : Tuple = self.__dict__.copy()
a__ : Optional[int] = None
return state
def __setstate__( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : List[Any] = {}
a__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , snake_case ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(snake_case , out_type=snake_case )
def _snake_case ( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(snake_case )
def _snake_case ( self , snake_case ) -> Any:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
a__ : Optional[Any] = self.sp_model.IdToPiece(snake_case )
return token
def _snake_case ( self , snake_case ) -> Any:
"""simple docstring"""
a__ : int = []
a__ : Dict = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case ) + token
a__ : Dict = []
else:
current_sub_tokens.append(snake_case )
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a__ : Tuple = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , "wb" ) as fi:
a__ : str = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 112 | 0 |
from math import factorial, radians
def _lowercase ( a_ : float ,a_ : int = 1_8 ,a_ : int = 1_0 ) -> float:
'''simple docstring'''
__magic_name__ = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__magic_name__ = radians(a_ )
__magic_name__ = angle_in_radians
__magic_name__ = 3
__magic_name__ = -1
for _ in range(a_ ):
result += (b * (angle_in_radians**a)) / factorial(a_ )
__magic_name__ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(a_ ,a_ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 705 |
from math import factorial, radians
def _lowercase ( a_ : float ,a_ : int = 1_8 ,a_ : int = 1_0 ) -> float:
'''simple docstring'''
__magic_name__ = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__magic_name__ = radians(a_ )
__magic_name__ = angle_in_radians
__magic_name__ = 3
__magic_name__ = -1
for _ in range(a_ ):
result += (b * (angle_in_radians**a)) / factorial(a_ )
__magic_name__ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(a_ ,a_ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 184 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCAmelCase ( lowercase , unittest.TestCase ):
lowerCamelCase_ : int = BarthezTokenizer
lowerCamelCase_ : Optional[Any] = BarthezTokenizerFast
lowerCamelCase_ : Dict = True
lowerCamelCase_ : Optional[Any] = True
def _snake_case ( self : Union[str, Any]):
super().setUp()
SCREAMING_SNAKE_CASE_ :Any = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokenizer
def _snake_case ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :List[Any] = "<pad>"
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase) , UpperCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase) , UpperCAmelCase)
def _snake_case ( self : List[str]):
SCREAMING_SNAKE_CASE_ :str = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(vocab_keys[-1] , "<mask>")
self.assertEqual(len(UpperCAmelCase) , 10_11_22)
def _snake_case ( self : Optional[Any]):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22)
@require_torch
def _snake_case ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
SCREAMING_SNAKE_CASE_ :Any = [0, 57, 30_18, 7_03_07, 91, 2]
SCREAMING_SNAKE_CASE_ :Any = self.tokenizer(
UpperCAmelCase , max_length=len(UpperCAmelCase) , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors="pt")
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
SCREAMING_SNAKE_CASE_ :Dict = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase)
def _snake_case ( self : Union[str, Any]):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ :List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ :List[str] = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_ :List[Any] = tokenizer.tokenize(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = rust_tokenizer.tokenize(UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Tuple = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Any = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ :Any = tokenizer.encode(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = rust_tokenizer.encode(UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase)
@slow
def _snake_case ( self : Optional[Any]):
# fmt: off
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {"input_ids": [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE_ :int = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=UpperCAmelCase , )
| 631 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _UpperCAmelCase ( unittest.TestCase ):
def _snake_case ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :Optional[int] = ["a", "b", "c"]
# Defaults to last layer if both are None
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Dict = get_aligned_output_features_output_indices(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
self.assertEqual(UpperCAmelCase , ["c"])
self.assertEqual(UpperCAmelCase , [2])
# Out indices set to match out features
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Tuple = get_aligned_output_features_output_indices(["a", "c"] , UpperCAmelCase , UpperCAmelCase)
self.assertEqual(UpperCAmelCase , ["a", "c"])
self.assertEqual(UpperCAmelCase , [0, 2])
# Out features set to match out indices
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :int = get_aligned_output_features_output_indices(UpperCAmelCase , [0, 2] , UpperCAmelCase)
self.assertEqual(UpperCAmelCase , ["a", "c"])
self.assertEqual(UpperCAmelCase , [0, 2])
# Out features selected from negative indices
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Any = get_aligned_output_features_output_indices(UpperCAmelCase , [-3, -1] , UpperCAmelCase)
self.assertEqual(UpperCAmelCase , ["a", "c"])
self.assertEqual(UpperCAmelCase , [-3, -1])
def _snake_case ( self : str):
# Stage names must be set
with self.assertRaises(UpperCAmelCase):
verify_out_features_out_indices(["a", "b"] , (0, 1) , UpperCAmelCase)
# Out features must be a list
with self.assertRaises(UpperCAmelCase):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"])
# Out features must be a subset of stage names
with self.assertRaises(UpperCAmelCase):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"])
# Out indices must be a list or tuple
with self.assertRaises(UpperCAmelCase):
verify_out_features_out_indices(UpperCAmelCase , 0 , ["a", "b"])
# Out indices must be a subset of stage names
with self.assertRaises(UpperCAmelCase):
verify_out_features_out_indices(UpperCAmelCase , (0, 1) , ["a"])
# Out features and out indices must be the same length
with self.assertRaises(UpperCAmelCase):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"])
# Out features should match out indices
with self.assertRaises(UpperCAmelCase):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"])
# Out features and out indices should be in order
with self.assertRaises(UpperCAmelCase):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"])
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"])
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :int = BackboneMixin()
SCREAMING_SNAKE_CASE_ :List[str] = ["a", "b", "c"]
SCREAMING_SNAKE_CASE_ :str = ["a", "c"]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
SCREAMING_SNAKE_CASE_ :Tuple = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"])
self.assertEqual(backbone.out_indices , [0, 1])
SCREAMING_SNAKE_CASE_ :List[str] = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"])
self.assertEqual(backbone.out_indices , [-3, -1])
| 631 | 1 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def _lowercase ( self: Dict):
'''simple docstring'''
__lowerCAmelCase = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""")
__lowerCAmelCase = AutoTokenizer.from_pretrained("""xlm-roberta-base""")
__lowerCAmelCase = """The dog is cute and lives in the garden house"""
__lowerCAmelCase = jnp.array([tokenizer.encode(_lowercase)])
__lowerCAmelCase = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
__lowerCAmelCase = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]])
__lowerCAmelCase = model(_lowercase)["""last_hidden_state"""]
self.assertEqual(output.shape, _lowercase)
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1], _lowercase, atol=1e-3))
| 334 |
def UpperCAmelCase ( UpperCamelCase__ ) -> str:
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
__lowerCAmelCase = False
if num < 0:
__lowerCAmelCase = True
__lowerCAmelCase = -num
__lowerCAmelCase = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(UpperCamelCase__ ) for e in binary )
return "0b" + "".join(str(UpperCamelCase__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334 | 1 |
"""simple docstring"""
def A_ ( lowercase = 1000 ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = 3
UpperCAmelCase_ : List[str] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 470 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ : str = ["""image_processor""", """tokenizer"""]
UpperCamelCase_ : List[str] = """OwlViTImageProcessor"""
UpperCamelCase_ : str = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Any , a_ : Any=None , a_ : str=None , **a_ : List[str] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a_ , )
UpperCAmelCase_ : List[Any] = kwargs.pop("""feature_extractor""" )
UpperCAmelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a_ , a_ )
def __call__( self : Any , a_ : Optional[int]=None , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="max_length" , a_ : List[Any]="np" , **a_ : int )-> Tuple:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(a_ , a_ ) or (isinstance(a_ , a_ ) and not isinstance(text[0] , a_ )):
UpperCAmelCase_ : List[str] = [self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )]
elif isinstance(a_ , a_ ) and isinstance(text[0] , a_ ):
UpperCAmelCase_ : Optional[int] = []
# Maximum number of queries across batch
UpperCAmelCase_ : Union[str, Any] = max([len(a_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(a_ ) != max_num_queries:
UpperCAmelCase_ : str = t + [""" """] * (max_num_queries - len(a_ ))
UpperCAmelCase_ : Optional[int] = self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )
encodings.append(a_ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
UpperCAmelCase_ : List[Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Tuple = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase_ : Tuple = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Tuple = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase_ : str = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
UpperCAmelCase_ : str = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase_ : Union[str, Any] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase_ : Optional[int] = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
UpperCAmelCase_ : Union[str, Any] = BatchEncoding()
UpperCAmelCase_ : int = input_ids
UpperCAmelCase_ : List[str] = attention_mask
if query_images is not None:
UpperCAmelCase_ : Optional[int] = BatchEncoding()
UpperCAmelCase_ : Any = self.image_processor(
a_ , return_tensors=a_ , **a_ ).pixel_values
UpperCAmelCase_ : Optional[Any] = query_pixel_values
if images is not None:
UpperCAmelCase_ : str = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
UpperCAmelCase_ : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase_ : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def a ( self : Any , *a_ : Optional[Any] , **a_ : List[str] )-> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process(*a_ , **a_ )
def a ( self : Tuple , *a_ : List[str] , **a_ : Dict )-> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*a_ , **a_ )
def a ( self : Optional[int] , *a_ : Tuple , **a_ : Optional[int] )-> Optional[Any]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*a_ , **a_ )
def a ( self : str , *a_ : Optional[int] , **a_ : str )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def a ( self : str , *a_ : List[Any] , **a_ : List[str] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def a ( self : Tuple )-> int:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a_ , )
return self.image_processor_class
@property
def a ( self : Optional[Any] )-> int:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a_ , )
return self.image_processor
| 470 | 1 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__A : Any = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 450 |
from __future__ import annotations
import math
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if num <= 0:
SCREAMING_SNAKE_CASE = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = [True] * (num + 1)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
SCREAMING_SNAKE_CASE = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 450 | 1 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
def A__ ( lowerCamelCase , lowerCamelCase ) -> Dict:
try:
with open(lowerCamelCase , """rb""" ) as flax_state_f:
UpperCamelCase_: Tuple = from_bytes(lowerCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(lowerCamelCase ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(lowerCamelCase , lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
UpperCamelCase_: int = flatten_dict(jax.tree_util.tree_map(lambda lowerCamelCase : x.dtype == jnp.bfloataa , lowerCamelCase ) ).values()
if any(lowerCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
UpperCamelCase_: Optional[int] = jax.tree_util.tree_map(
lambda lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCamelCase )
UpperCamelCase_: List[str] = """"""
UpperCamelCase_: Optional[Any] = flatten_dict(lowerCamelCase , sep=""".""" )
UpperCamelCase_: List[str] = pt_model.state_dict()
# keep track of unexpected & missing keys
UpperCamelCase_: int = []
UpperCamelCase_: List[str] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase_: Tuple = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
UpperCamelCase_: str = flax_key_tuple_array[:-1] + ["""weight"""]
UpperCamelCase_: Any = jnp.transpose(lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
UpperCamelCase_: Tuple = flax_key_tuple_array[:-1] + ["""weight"""]
UpperCamelCase_: int = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
UpperCamelCase_: Any = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(lowerCamelCase ):
UpperCamelCase_: Dict = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
UpperCamelCase_: Tuple = """.""".join(lowerCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
UpperCamelCase_: Union[str, Any] = np.asarray(lowerCamelCase ) if not isinstance(lowerCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase )
# remove from missing keys
missing_keys.remove(lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCamelCase )
pt_model.load_state_dict(lowerCamelCase )
# re-transform missing_keys to list
UpperCamelCase_: int = list(lowerCamelCase )
if len(lowerCamelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(lowerCamelCase ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
""" use it for predictions and inference.""" )
return pt_model
| 548 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Tuple = (CMStochasticIterativeScheduler,)
__UpperCamelCase : List[str] = 10
def lowerCAmelCase__ ( self : Optional[int] , **snake_case_ : List[str] ):
UpperCamelCase_: Optional[int] = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**snake_case_ )
return config
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Tuple = 10
UpperCamelCase_: Tuple = self.get_scheduler_config()
UpperCamelCase_: List[str] = self.scheduler_classes[0](**snake_case_ )
scheduler.set_timesteps(snake_case_ )
UpperCamelCase_: int = scheduler.timesteps[0]
UpperCamelCase_: str = scheduler.timesteps[1]
UpperCamelCase_: Optional[int] = self.dummy_sample
UpperCamelCase_: int = 0.1 * sample
UpperCamelCase_: Optional[Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
UpperCamelCase_: Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase__ ( self : Optional[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case_ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = self.scheduler_classes[0]
UpperCamelCase_: List[str] = self.get_scheduler_config()
UpperCamelCase_: Tuple = scheduler_class(**snake_case_ )
UpperCamelCase_: Optional[int] = 1
scheduler.set_timesteps(snake_case_ )
UpperCamelCase_: Any = scheduler.timesteps
UpperCamelCase_: int = torch.manual_seed(0 )
UpperCamelCase_: Any = self.dummy_model()
UpperCamelCase_: Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case_ ):
# 1. scale model input
UpperCamelCase_: str = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
UpperCamelCase_: List[Any] = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
UpperCamelCase_: str = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
UpperCamelCase_: Union[str, Any] = pred_prev_sample
UpperCamelCase_: int = torch.sum(torch.abs(snake_case_ ) )
UpperCamelCase_: List[Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Dict = self.scheduler_classes[0]
UpperCamelCase_: Tuple = self.get_scheduler_config()
UpperCamelCase_: List[Any] = scheduler_class(**snake_case_ )
UpperCamelCase_: str = [106, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
UpperCamelCase_: int = scheduler.timesteps
UpperCamelCase_: List[Any] = torch.manual_seed(0 )
UpperCamelCase_: List[Any] = self.dummy_model()
UpperCamelCase_: int = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase_: Optional[int] = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
UpperCamelCase_: Dict = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
UpperCamelCase_: str = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
UpperCamelCase_: List[str] = pred_prev_sample
UpperCamelCase_: Union[str, Any] = torch.sum(torch.abs(snake_case_ ) )
UpperCamelCase_: str = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: int = self.scheduler_classes[0]
UpperCamelCase_: Optional[int] = self.get_scheduler_config()
UpperCamelCase_: Tuple = scheduler_class(**snake_case_ )
UpperCamelCase_: Any = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=snake_case_ )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.scheduler_classes[0]
UpperCamelCase_: Tuple = self.get_scheduler_config()
UpperCamelCase_: List[str] = scheduler_class(**snake_case_ )
UpperCamelCase_: Tuple = [39, 30, 12, 1, 0]
UpperCamelCase_: List[Any] = len(snake_case_ )
with self.assertRaises(snake_case_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Any = self.scheduler_classes[0]
UpperCamelCase_: int = self.get_scheduler_config()
UpperCamelCase_: int = scheduler_class(**snake_case_ )
UpperCamelCase_: int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=snake_case_ )
| 548 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowercase_ :
def __init__( self , __A , __A=14 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=3 , __A=4 , __A=None , ) -> Tuple:
SCREAMING_SNAKE_CASE_ : int =parent
SCREAMING_SNAKE_CASE_ : Dict =batch_size
SCREAMING_SNAKE_CASE_ : int =seq_length
SCREAMING_SNAKE_CASE_ : Tuple =is_training
SCREAMING_SNAKE_CASE_ : int =use_token_type_ids
SCREAMING_SNAKE_CASE_ : str =use_input_mask
SCREAMING_SNAKE_CASE_ : Tuple =use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] =use_mc_token_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] =hidden_size
SCREAMING_SNAKE_CASE_ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] =intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple =hidden_act
SCREAMING_SNAKE_CASE_ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict =max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] =type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] =type_sequence_label_size
SCREAMING_SNAKE_CASE_ : str =initializer_range
SCREAMING_SNAKE_CASE_ : Tuple =num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] =num_choices
SCREAMING_SNAKE_CASE_ : Optional[Any] =scope
SCREAMING_SNAKE_CASE_ : str =self.vocab_size - 1
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Tuple =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict =None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE_ : Any =ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE_ : Tuple =None
SCREAMING_SNAKE_CASE_ : int =None
SCREAMING_SNAKE_CASE_ : Optional[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self ) -> List[Any]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _snake_case ( self , __A , __A , __A , __A , __A , *__A ) -> int:
SCREAMING_SNAKE_CASE_ : List[str] =CTRLModel(config=__A )
model.to(__A )
model.eval()
model(__A , token_type_ids=__A , head_mask=__A )
model(__A , token_type_ids=__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _snake_case ( self , __A , __A , __A , __A , __A , *__A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Dict =CTRLLMHeadModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : int =model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ : Dict =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : int =config_and_inputs
SCREAMING_SNAKE_CASE_ : int ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _snake_case ( self , __A , __A , __A , __A , *__A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Any =self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] =CTRLForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict =model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowercase_ ( A , A , A , unittest.TestCase ):
__lowerCamelCase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__lowerCamelCase = (CTRLLMHeadModel,) if is_torch_available() else ()
__lowerCamelCase = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def _snake_case ( self , __A , __A , __A , __A , __A ) -> Optional[int]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : Tuple =CTRLModelTester(self )
SCREAMING_SNAKE_CASE_ : Dict =ConfigTester(self , config_class=__A , n_embd=37 )
def _snake_case ( self ) -> Union[str, Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> List[str]:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__A )
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self ) -> Optional[int]:
pass
@slow
def _snake_case ( self ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : str =CTRLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _snake_case ( self ) -> Optional[int]:
pass
@require_torch
class lowercase_ ( unittest.TestCase ):
def _snake_case ( self ) -> List[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Any =CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(__A )
SCREAMING_SNAKE_CASE_ : Any =torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=__A ) # Legal the president is
SCREAMING_SNAKE_CASE_ : Optional[Any] =[
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE_ : Dict =model.generate(__A , do_sample=__A )
self.assertListEqual(output_ids[0].tolist() , __A )
| 431 | 0 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
lowerCamelCase__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCamelCase__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
lowerCamelCase__ = np.concatenate(__lowerCAmelCase , axis=0 )
lowerCamelCase__ = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0
lowerCamelCase__ = image.transpose(0 , 3 , 1 , 2 )
lowerCamelCase__ = 2.0 * image - 1.0
lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
lowerCamelCase__ = torch.cat(__lowerCAmelCase , dim=0 )
return image
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any=0.9995 ):
if not isinstance(__lowerCAmelCase , np.ndarray ):
lowerCamelCase__ = True
lowerCamelCase__ = va.device
lowerCamelCase__ = va.cpu().numpy()
lowerCamelCase__ = va.cpu().numpy()
lowerCamelCase__ = np.sum(va * va / (np.linalg.norm(__lowerCAmelCase ) * np.linalg.norm(__lowerCAmelCase )) )
if np.abs(__lowerCAmelCase ) > DOT_THRESHOLD:
lowerCamelCase__ = (1 - t) * va + t * va
else:
lowerCamelCase__ = np.arccos(__lowerCAmelCase )
lowerCamelCase__ = np.sin(__lowerCAmelCase )
lowerCamelCase__ = theta_a * t
lowerCamelCase__ = np.sin(__lowerCAmelCase )
lowerCamelCase__ = np.sin(theta_a - theta_t ) / sin_theta_a
lowerCamelCase__ = sin_theta_t / sin_theta_a
lowerCamelCase__ = sa * va + sa * va
if inputs_are_torch:
lowerCamelCase__ = torch.from_numpy(__lowerCAmelCase ).to(__lowerCAmelCase )
return va
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = F.normalize(__lowerCAmelCase , dim=-1 )
lowerCamelCase__ = F.normalize(__lowerCAmelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ):
for param in model.parameters():
lowerCamelCase__ = value
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,):
super().__init__()
self.register_modules(
vae=_lowerCAmelCase ,text_encoder=_lowerCAmelCase ,clip_model=_lowerCAmelCase ,tokenizer=_lowerCAmelCase ,unet=_lowerCAmelCase ,scheduler=_lowerCAmelCase ,feature_extractor=_lowerCAmelCase ,coca_model=_lowerCAmelCase ,coca_tokenizer=_lowerCAmelCase ,coca_transform=_lowerCAmelCase ,)
lowerCamelCase__ = (
feature_extractor.size
if isinstance(feature_extractor.size ,_lowerCAmelCase )
else feature_extractor.size["""shortest_edge"""]
)
lowerCamelCase__ = transforms.Normalize(mean=feature_extractor.image_mean ,std=feature_extractor.image_std )
set_requires_grad(self.text_encoder ,_lowerCAmelCase )
set_requires_grad(self.clip_model ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
self.enable_attention_slicing(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
set_requires_grad(self.vae ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
set_requires_grad(self.vae ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
set_requires_grad(self.unet ,_lowerCAmelCase )
def UpperCamelCase_ ( self ):
set_requires_grad(self.unet ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
# get the original timestep using init_timestep
lowerCamelCase__ = min(int(num_inference_steps * strength ) ,_lowerCAmelCase )
lowerCamelCase__ = max(num_inference_steps - init_timestep ,0 )
lowerCamelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ):
if not isinstance(_lowerCAmelCase ,torch.Tensor ):
raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(_lowerCAmelCase )}''' )
lowerCamelCase__ = image.to(device=_lowerCAmelCase ,dtype=_lowerCAmelCase )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowerCAmelCase )
]
lowerCamelCase__ = torch.cat(_lowerCAmelCase ,dim=0 )
else:
lowerCamelCase__ = self.vae.encode(_lowerCAmelCase ).latent_dist.sample(_lowerCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase__ = 0.1_8215 * init_latents
lowerCamelCase__ = init_latents.repeat_interleave(_lowerCAmelCase ,dim=0 )
lowerCamelCase__ = randn_tensor(init_latents.shape ,generator=_lowerCAmelCase ,device=_lowerCAmelCase ,dtype=_lowerCAmelCase )
# get latents
lowerCamelCase__ = self.scheduler.add_noise(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = init_latents
return latents
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = self.coca_transform(_lowerCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCamelCase__ = self.coca_model.generate(transformed_image.to(device=self.device ,dtype=self.coca_model.dtype ) )
lowerCamelCase__ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" ,"""""" ).rstrip(""" .,""" )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.feature_extractor.preprocess(_lowerCAmelCase )
lowerCamelCase__ = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCamelCase__ = self.clip_model.get_image_features(_lowerCAmelCase )
lowerCamelCase__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=_lowerCAmelCase )
lowerCamelCase__ = image_embeddings_clip.repeat_interleave(_lowerCAmelCase ,dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
lowerCamelCase__ = latents.detach().requires_grad_()
lowerCamelCase__ = self.scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase )
# predict the noise residual
lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ).sample
if isinstance(self.scheduler ,(PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCamelCase__ = self.scheduler.alphas_cumprod[timestep]
lowerCamelCase__ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCamelCase__ = torch.sqrt(_lowerCAmelCase )
lowerCamelCase__ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler ,_lowerCAmelCase ):
lowerCamelCase__ = self.scheduler.sigmas[index]
lowerCamelCase__ = latents - sigma * noise_pred
else:
raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase__ = 1 / 0.1_8215 * sample
lowerCamelCase__ = self.vae.decode(_lowerCAmelCase ).sample
lowerCamelCase__ = (image / 2 + 0.5).clamp(0 ,1 )
lowerCamelCase__ = transforms.Resize(self.feature_extractor_size )(_lowerCAmelCase )
lowerCamelCase__ = self.normalize(_lowerCAmelCase ).to(latents.dtype )
lowerCamelCase__ = self.clip_model.get_image_features(_lowerCAmelCase )
lowerCamelCase__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 ,dim=-1 ,keepdim=_lowerCAmelCase )
lowerCamelCase__ = spherical_dist_loss(_lowerCAmelCase ,_lowerCAmelCase ).mean() * clip_guidance_scale
lowerCamelCase__ = -torch.autograd.grad(_lowerCAmelCase ,_lowerCAmelCase )[0]
if isinstance(self.scheduler ,_lowerCAmelCase ):
lowerCamelCase__ = latents.detach() + grads * (sigma**2)
lowerCamelCase__ = noise_pred_original
else:
lowerCamelCase__ = noise_pred_original - torch.sqrt(_lowerCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = 5_12 ,_lowerCAmelCase = 5_12 ,_lowerCAmelCase = 0.6 ,_lowerCAmelCase = 50 ,_lowerCAmelCase = 7.5 ,_lowerCAmelCase = 1 ,_lowerCAmelCase = 0.0 ,_lowerCAmelCase = 1_00 ,_lowerCAmelCase = None ,_lowerCAmelCase = "pil" ,_lowerCAmelCase = True ,_lowerCAmelCase = 0.8 ,_lowerCAmelCase = 0.1 ,_lowerCAmelCase = 0.1 ,):
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(_lowerCAmelCase )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(_lowerCAmelCase ,torch.Generator ) and batch_size > 1:
lowerCamelCase__ = [generator] + [None] * (batch_size - 1)
lowerCamelCase__ = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
lowerCamelCase__ = [x[0] for x in coca_is_none if x[1]]
lowerCamelCase__ = """, """.join(_lowerCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_lowerCAmelCase ):
raise ValueError(
F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowerCamelCase__ = self.get_image_description(_lowerCAmelCase )
if style_prompt is None:
if len(_lowerCAmelCase ):
raise ValueError(
F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
lowerCamelCase__ = self.get_image_description(_lowerCAmelCase )
# get prompt text embeddings for content and style
lowerCamelCase__ = self.tokenizer(
_lowerCAmelCase ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=_lowerCAmelCase ,return_tensors="""pt""" ,)
lowerCamelCase__ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase__ = self.tokenizer(
_lowerCAmelCase ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,truncation=_lowerCAmelCase ,return_tensors="""pt""" ,)
lowerCamelCase__ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCamelCase__ = slerp(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
lowerCamelCase__ = text_embeddings.repeat_interleave(_lowerCAmelCase ,dim=0 )
# set timesteps
lowerCamelCase__ = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCamelCase__ = {}
if accepts_offset:
lowerCamelCase__ = 1
self.scheduler.set_timesteps(_lowerCAmelCase ,**_lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCamelCase__ , lowerCamelCase__ = self.get_timesteps(_lowerCAmelCase ,_lowerCAmelCase ,self.device )
lowerCamelCase__ = timesteps[:1].repeat(_lowerCAmelCase )
# Preprocess image
lowerCamelCase__ = preprocess(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = self.prepare_latents(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,text_embeddings.dtype ,self.device ,_lowerCAmelCase )
lowerCamelCase__ = preprocess(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = self.prepare_latents(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,text_embeddings.dtype ,self.device ,_lowerCAmelCase )
lowerCamelCase__ = slerp(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if clip_guidance_scale > 0:
lowerCamelCase__ = self.get_clip_image_embeddings(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = self.get_clip_image_embeddings(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = slerp(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase__ = content_text_input.input_ids.shape[-1]
lowerCamelCase__ = self.tokenizer([""""""] ,padding="""max_length""" ,max_length=_lowerCAmelCase ,return_tensors="""pt""" )
lowerCamelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCamelCase__ = uncond_embeddings.repeat_interleave(_lowerCAmelCase ,dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase__ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCamelCase__ = torch.randn(_lowerCAmelCase ,generator=_lowerCAmelCase ,device="""cpu""" ,dtype=_lowerCAmelCase ).to(
self.device )
else:
lowerCamelCase__ = torch.randn(_lowerCAmelCase ,generator=_lowerCAmelCase ,device=self.device ,dtype=_lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowerCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase__ = {}
if accepts_eta:
lowerCamelCase__ = eta
# check if the scheduler accepts generator
lowerCamelCase__ = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCamelCase__ = generator
with self.progress_bar(total=_lowerCAmelCase ):
for i, t in enumerate(_lowerCAmelCase ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ = self.scheduler.scale_model_input(_lowerCAmelCase ,_lowerCAmelCase )
# predict the noise residual
lowerCamelCase__ = self.unet(_lowerCAmelCase ,_lowerCAmelCase ,encoder_hidden_states=_lowerCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase__ , lowerCamelCase__ = noise_pred.chunk(2 )
lowerCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCamelCase__ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCamelCase__ , lowerCamelCase__ = self.cond_fn(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ = self.scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase__ = 1 / 0.1_8215 * latents
lowerCamelCase__ = self.vae.decode(_lowerCAmelCase ).sample
lowerCamelCase__ = (image / 2 + 0.5).clamp(0 ,1 )
lowerCamelCase__ = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowerCamelCase__ = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_lowerCAmelCase ,nsfw_content_detected=_lowerCAmelCase )
| 50 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A: Any = logging.get_logger(__name__)
class UpperCAmelCase ( snake_case__ ):
_A : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , __A="</s>" , __A="<unk>" , __A="<pad>" , __A=125 , __A=None , **__A , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__UpperCAmelCase = [f'<extra_id_{i}>' for i in range(_A )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__UpperCAmelCase = len(set(filter(lambda __A : bool('extra_id' in str(_A ) ) , _A ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
__UpperCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
__UpperCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
__UpperCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
super().__init__(
eos_token=_A , unk_token=_A , pad_token=_A , extra_ids=_A , additional_special_tokens=_A , **_A , )
__UpperCAmelCase = extra_ids
__UpperCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
__UpperCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__UpperCAmelCase = len(self.special_tokens_encoder )
__UpperCAmelCase = len(_A )
for i, token in enumerate(_A ):
__UpperCAmelCase = self.vocab_size + i - n
__UpperCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCamelCase ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCamelCase ( self , __A , __A = None , __A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_A )) + [1]
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
def __lowerCamelCase ( self , __A ):
if len(_A ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCamelCase ( self , __A , __A = None ):
__UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCamelCase ( self , __A , __A = None ):
__UpperCAmelCase = self._add_eos_if_not_present(_A )
if token_ids_a is None:
return token_ids_a
else:
__UpperCAmelCase = self._add_eos_if_not_present(_A )
return token_ids_a + token_ids_a
def __lowerCamelCase ( self , __A ):
__UpperCAmelCase = [chr(_A ) for i in text.encode('utf-8' )]
return tokens
def __lowerCamelCase ( self , __A ):
if token in self.special_tokens_encoder:
__UpperCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__UpperCAmelCase = self.added_tokens_encoder[token]
elif len(_A ) != 1:
__UpperCAmelCase = self.unk_token_id
else:
__UpperCAmelCase = ord(_A ) + self._num_special_tokens
return token_id
def __lowerCamelCase ( self , __A ):
if index in self.special_tokens_decoder:
__UpperCAmelCase = self.special_tokens_decoder[index]
else:
__UpperCAmelCase = chr(index - self._num_special_tokens )
return token
def __lowerCamelCase ( self , __A ):
__UpperCAmelCase = b''
for token in tokens:
if token in self.special_tokens_decoder:
__UpperCAmelCase = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
__UpperCAmelCase = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
__UpperCAmelCase = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
__UpperCAmelCase = token.encode('utf-8' )
else:
__UpperCAmelCase = bytes([ord(_A )] )
bstring += tok_string
__UpperCAmelCase = bstring.decode('utf-8' , errors='ignore' )
return string
def __lowerCamelCase ( self , __A , __A = None ):
return ()
| 711 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 617 | 0 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_A : int = 'hf-internal-testing/tiny-random-bert'
_A : Optional[Any] = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
_A : str = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self : Dict ) ->Optional[int]:
lowerCamelCase__ : Optional[int] = cached_file(A , A )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(A ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(A , A ) ) )
with open(os.path.join(A , '''refs''' , '''main''' ) ) as f:
lowerCamelCase__ : Optional[int] = f.read()
self.assertEqual(A , os.path.join(A , '''snapshots''' , A , A ) )
self.assertTrue(os.path.isfile(A ) )
# File is cached at the same place the second time.
lowerCamelCase__ : str = cached_file(A , A )
self.assertEqual(A , A )
# Using a specific revision to test the full commit hash.
lowerCamelCase__ : Tuple = cached_file(A , A , revision='''9b8c223''' )
self.assertEqual(A , os.path.join(A , '''snapshots''' , A , A ) )
def __lowerCamelCase ( self : Any ) ->int:
with self.assertRaisesRegex(A , '''is not a valid model identifier''' ):
lowerCamelCase__ : Tuple = cached_file('''tiny-random-bert''' , A )
with self.assertRaisesRegex(A , '''is not a valid git identifier''' ):
lowerCamelCase__ : List[Any] = cached_file(A , A , revision='''aaaa''' )
with self.assertRaisesRegex(A , '''does not appear to have a file named''' ):
lowerCamelCase__ : Union[str, Any] = cached_file(A , '''conf''' )
def __lowerCamelCase ( self : Union[str, Any] ) ->List[str]:
with self.assertRaisesRegex(A , '''does not appear to have a file named''' ):
lowerCamelCase__ : Tuple = cached_file(A , '''conf''' )
with open(os.path.join(A , '''refs''' , '''main''' ) ) as f:
lowerCamelCase__ : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(A , '''.no_exist''' , A , '''conf''' ) ) )
lowerCamelCase__ : Dict = cached_file(A , '''conf''' , _raise_exceptions_for_missing_entries=A )
self.assertIsNone(A )
lowerCamelCase__ : str = cached_file(A , '''conf''' , local_files_only=A , _raise_exceptions_for_missing_entries=A )
self.assertIsNone(A )
lowerCamelCase__ : Union[str, Any] = mock.Mock()
lowerCamelCase__ : List[str] = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : int = HTTPError
lowerCamelCase__ : Dict = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=A ) as mock_head:
lowerCamelCase__ : Optional[int] = cached_file(A , '''conf''' , _raise_exceptions_for_connection_errors=A )
self.assertIsNone(A )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCamelCase ( self : Tuple ) ->Union[str, Any]:
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A ) )
def __lowerCamelCase ( self : List[str] ) ->List[Any]:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(A , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , A )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(A , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , A , revision='''ahaha''' )
lowerCamelCase__ : Tuple = get_file_from_repo('''bert-base-cased''' , A )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowerCamelCase__ : Union[str, Any] = json.loads(open(A , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_6_8 )
def __lowerCamelCase ( self : Optional[int] ) ->Any:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : Dict = Path(A ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(A , '''a.txt''' ) , str(A ) )
self.assertIsNone(get_file_from_repo(A , '''b.txt''' ) )
| 315 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = XLMTokenizer
_UpperCAmelCase : List[Any] = False
def __lowerCamelCase ( self : Union[str, Any] ) ->int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCamelCase__ : List[Any] = dict(zip(A , range(len(A ) ) ) )
lowerCamelCase__ : str = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowerCamelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(A ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(A ) )
def __lowerCamelCase ( self : str , A : Dict ) ->Tuple:
lowerCamelCase__ : str = '''lower newer'''
lowerCamelCase__ : Optional[int] = '''lower newer'''
return input_text, output_text
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[Any]:
lowerCamelCase__ : Dict = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase__ : Optional[Any] = '''lower'''
lowerCamelCase__ : Any = ['''low''', '''er</w>''']
lowerCamelCase__ : List[Any] = tokenizer.tokenize(A )
self.assertListEqual(A , A )
lowerCamelCase__ : int = tokens + ['''<unk>''']
lowerCamelCase__ : List[str] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
@slow
def __lowerCamelCase ( self : Union[str, Any] ) ->Union[str, Any]:
lowerCamelCase__ : Tuple = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
lowerCamelCase__ : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=A )
lowerCamelCase__ : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A )
lowerCamelCase__ : str = tokenizer.build_inputs_with_special_tokens(A )
lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 315 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_A = random.Random()
def lowercase_ ( A__ , A__=1.0 , A__=None , A__=None ) -> Optional[int]:
"""simple docstring"""
if rng is None:
snake_case = global_rng
snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase ( unittest.TestCase ):
def __init__(self : Optional[int] , _A : Optional[int] , _A : List[str]=7 , _A : Optional[int]=4_0_0 , _A : Optional[int]=2_0_0_0 , _A : List[str]=1 , _A : int=0.0 , _A : str=1_6_0_0_0 , _A : List[Any]=True , _A : Optional[int]=True , ) -> List[Any]:
snake_case = parent
snake_case = batch_size
snake_case = min_seq_length
snake_case = max_seq_length
snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case = feature_size
snake_case = padding_value
snake_case = sampling_rate
snake_case = return_attention_mask
snake_case = do_normalize
def UpperCAmelCase(self : Union[str, Any] ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase(self : Tuple , _A : int=False , _A : Optional[Any]=False ) -> Dict:
def _flatten(_A : int ):
return list(itertools.chain(*_A ) )
if equal_length:
snake_case = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
class lowerCamelCase ( A_ , unittest.TestCase ):
UpperCAmelCase__ : Any = WavaVecaFeatureExtractor
def UpperCAmelCase(self : List[Any] ) -> int:
snake_case = WavaVecaFeatureExtractionTester(self )
def UpperCAmelCase(self : str , _A : Optional[Any] ) -> Tuple:
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) )
def UpperCAmelCase(self : Any ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case = [np.asarray(_A ) for speech_input in speech_inputs]
# Test not batched input
snake_case = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
snake_case = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
snake_case = feat_extract(_A , return_tensors="np" ).input_values
snake_case = feat_extract(_A , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case = np.asarray(_A )
snake_case = feat_extract(_A , return_tensors="np" ).input_values
snake_case = feat_extract(_A , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def UpperCAmelCase(self : Any ) -> Tuple:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case = ["longest", "max_length", "do_not_pad"]
snake_case = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
snake_case = feat_extract(_A , padding=_A , max_length=_A , return_tensors="np" )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def UpperCAmelCase(self : Union[str, Any] ) -> Union[str, Any]:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = range(8_0_0 , 1_4_0_0 , 2_0_0 )
snake_case = [floats_list((1, x) )[0] for x in lengths]
snake_case = ["longest", "max_length", "do_not_pad"]
snake_case = [None, 1_6_0_0, None]
for max_length, padding in zip(_A , _A ):
snake_case = feat_extract(_A , max_length=_A , padding=_A )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def UpperCAmelCase(self : int ) -> Union[str, Any]:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding="max_length" , return_tensors="np" )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCAmelCase(self : List[str] ) -> int:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case = feat_extract(
_A , truncation=_A , max_length=1_0_0_0 , padding="longest" , return_tensors="np" )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case = feat_extract(
_A , truncation=_A , max_length=2_0_0_0 , padding="longest" , return_tensors="np" )
snake_case = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def UpperCAmelCase(self : Dict ) -> Any:
import torch
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = np.random.rand(1_0_0 ).astype(np.floataa )
snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCAmelCase(self : List[str] ) -> str:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
snake_case = WavaVecaConfig.from_pretrained(_A )
snake_case = WavaVecaFeatureExtractor.from_pretrained(_A )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == "layer" )
| 294 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__(self : Optional[Any] , _A : List[str] , _A : Tuple=7 , _A : List[str]=3 , _A : Dict=1_8 , _A : int=3_0 , _A : str=4_0_0 , _A : List[str]=True , _A : Optional[int]=None , _A : Optional[int]=True , ) -> Dict:
snake_case = size if size is not None else {"height": 1_8, "width": 1_8}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def UpperCAmelCase(self : Union[str, Any] ) -> int:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCamelCase ( A_ , unittest.TestCase ):
UpperCAmelCase__ : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCAmelCase(self : List[str] ) -> Dict:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def UpperCAmelCase(self : Union[str, Any] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase(self : Any ) -> Optional[int]:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , "do_resize" ) )
self.assertTrue(hasattr(_A , "size" ) )
self.assertTrue(hasattr(_A , "apply_ocr" ) )
def UpperCAmelCase(self : Union[str, Any] ) -> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def UpperCAmelCase(self : List[Any] ) -> Optional[Any]:
pass
def UpperCAmelCase(self : Dict ) -> Optional[int]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , _A )
self.assertIsInstance(encoding.boxes , _A )
# Test batched
snake_case = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase(self : Tuple ) -> List[str]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
snake_case = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase(self : Optional[Any] ) -> Tuple:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
snake_case = image_processing(_A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCAmelCase(self : Any ) -> List[str]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
snake_case = Image.open(ds[0]["file"] ).convert("RGB" )
snake_case = image_processing(_A , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
snake_case = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _A )
self.assertListEqual(encoding.boxes , _A )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=_A )
snake_case = image_processing(_A , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 294 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self, _lowercase, _lowercase=13, _lowercase=7, _lowercase=True, _lowercase=True, _lowercase=True, _lowercase=True, _lowercase=99, _lowercase=32, _lowercase=5, _lowercase=4, _lowercase=37, _lowercase="gelu", _lowercase=0.1, _lowercase=0.1, _lowercase=512, _lowercase=16, _lowercase=2, _lowercase=0.02, _lowercase=4, ) -> int:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_attention_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_choices
def a__ ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_lowercase, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def a__ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class snake_case ( lowercase_, unittest.TestCase ):
"""simple docstring"""
_a = True
_a = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def a__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=_lowercase )
SCREAMING_SNAKE_CASE_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=_lowercase )
SCREAMING_SNAKE_CASE_ = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]], dtype=jnp.intaa )
SCREAMING_SNAKE_CASE_ = model(_lowercase )[0]
SCREAMING_SNAKE_CASE_ = [1, 11, 50265]
self.assertEqual(list(output.shape ), _lowercase )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], _lowercase, atol=1E-4 ) )
@slow
def a__ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=_lowercase )
SCREAMING_SNAKE_CASE_ = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]], dtype=jnp.intaa )
SCREAMING_SNAKE_CASE_ = model(_lowercase )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], _lowercase, atol=1E-4 ) )
| 294 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE : List[str] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> str:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, 'models/bert/' ) )
SCREAMING_SNAKE_CASE_ = self.transformer_dir
shutil.copy(
os.path.join(_lowercase, 'src/transformers/models/bert/modeling_bert.py' ), os.path.join(self.transformer_dir, 'models/bert/modeling_bert.py' ), )
def a__ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def a__ ( self, _lowercase, _lowercase, _lowercase, _lowercase=None ) -> List[str]:
SCREAMING_SNAKE_CASE_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
SCREAMING_SNAKE_CASE_ = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
SCREAMING_SNAKE_CASE_ = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
SCREAMING_SNAKE_CASE_ = black.format_str(_lowercase, mode=_lowercase )
SCREAMING_SNAKE_CASE_ = os.path.join(self.transformer_dir, 'new_code.py' )
with open(_lowercase, 'w', newline='\n' ) as f:
f.write(_lowercase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_lowercase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=_lowercase )
with open(_lowercase, 'r' ) as f:
self.assertTrue(f.read(), _lowercase )
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(_lowercase, _lowercase )
def a__ ( self ) -> Union[str, Any]:
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead', 'BertLMPredictionHead', REFERENCE_CODE + '\n', )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead', 'BertLMPredictionHead', _lowercase, )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel', 'TestModelLMPredictionHead', re.sub('Bert', 'TestModel', _lowercase ), )
# Copy consistency with a really long name
SCREAMING_SNAKE_CASE_ = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""", f"""{long_class_name}LMPredictionHead""", re.sub('Bert', _lowercase, _lowercase ), )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel', 'TestModelLMPredictionHead', _lowercase, overwrite_result=re.sub('Bert', 'TestModel', _lowercase ), )
def a__ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = check_copies.LOCALIZED_READMES['README_zh-hans.md']
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = check_copies.convert_to_localized_md(
_lowercase, _lowercase, localized_readme['format_model_list'] )
self.assertFalse(_lowercase )
self.assertEqual(_lowercase, _lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = check_copies.convert_to_localized_md(
_lowercase, _lowercase, localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(_lowercase )
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE_ = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = check_copies.convert_to_localized_md(
_lowercase, _lowercase, localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(_lowercase, _lowercase )
| 294 | 1 |
import re
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
lowercase_ = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 716 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : Dict, __lowercase : int, __lowercase : Union[str, Any]=7, __lowercase : Union[str, Any]=3, __lowercase : Any=18, __lowercase : Union[str, Any]=30, __lowercase : Any=400, __lowercase : List[str]=True, __lowercase : Dict=None, __lowercase : List[str]=True, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : str=True, __lowercase : Optional[int]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {"height": 18, "width": 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def A__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =DonutImageProcessor if is_vision_available() else None
def A__ ( self : str ):
lowercase__ = DonutImageProcessingTester(self )
@property
def A__ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_resize" ) )
self.assertTrue(hasattr(__lowercase, "size" ) )
self.assertTrue(hasattr(__lowercase, "do_thumbnail" ) )
self.assertTrue(hasattr(__lowercase, "do_align_long_axis" ) )
self.assertTrue(hasattr(__lowercase, "do_pad" ) )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "image_mean" ) )
self.assertTrue(hasattr(__lowercase, "image_std" ) )
def A__ ( self : str ):
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 18, "width": 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {"height": 84, "width": 42} )
def A__ ( self : List[str] ):
pass
@is_flaky()
def A__ ( self : Dict ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Optional[Any] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Tuple ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
| 37 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = set()
SCREAMING_SNAKE_CASE_ : Any = []
def parse_line(lowerCamelCase_ : str ):
for line in fp:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = '\n'.join(lowerCamelCase_ )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(lowerCamelCase_ )
buffer.clear()
continue
else:
SCREAMING_SNAKE_CASE_ : Tuple = line.strip()
buffer.append(lowerCamelCase_ )
if from_gh:
for filename in os.listdir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCamelCase_ ) as fp:
parse_line(lowerCamelCase_ )
else:
try:
with zipfile.ZipFile(lowerCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCamelCase_ ) as fp:
parse_line(lowerCamelCase_ )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = set()
SCREAMING_SNAKE_CASE_ : Tuple = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for p in os.listdir(lowerCamelCase_ ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCamelCase_ , lowerCamelCase_ ) )
return selected_warnings
if __name__ == "__main__":
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Any:
"""simple docstring"""
return values.split(',' )
UpperCamelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
UpperCamelCase__ : Optional[int] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCamelCase__ : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCamelCase__ : str = extract_warnings(args.output_dir, args.targets)
UpperCamelCase__ : Dict = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 105 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = '''https://openaipublic.azureedge.net/jukebox/models/'''
UpperCamelCase__ : Optional[Any] = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> int:
"""simple docstring"""
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : List[str] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : Any = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : Tuple = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
SCREAMING_SNAKE_CASE_ : Optional[Any] = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {}
import re
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : str = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Tuple = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Dict = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = re_encoder_block_conv_in.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = regex_match.groups()
SCREAMING_SNAKE_CASE_ : List[str] = int(groups[2] ) * 2 + int(groups[3] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[int] = re_encoder_block_conv_in.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_encoder_block_resnet.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[Any] = re_encoder_block_resnet.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
SCREAMING_SNAKE_CASE_ : str = {'1': 1, '3': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : int = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : str = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[Any] = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : List[Any] = re_encoder_block_resnet.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_encoder_block_proj_out.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = re_encoder_block_proj_out.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = regex_match.groups()
SCREAMING_SNAKE_CASE_ : int = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Tuple = re_encoder_block_proj_out.sub(lowerCamelCase_ , lowerCamelCase_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Any = re_decoder_block_conv_out.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = regex_match.groups()
SCREAMING_SNAKE_CASE_ : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
SCREAMING_SNAKE_CASE_ : Optional[int] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[int] = re_decoder_block_conv_out.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_decoder_block_resnet.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Dict = re_decoder_block_resnet.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : str = int(groups[2] ) * 2 + int(groups[3] ) - 2
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'1': 1, '3': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : Dict = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : Dict = re_decoder_block_resnet.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_decoder_block_proj_in.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re_decoder_block_proj_in.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : int = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[Any] = re_decoder_block_proj_in.sub(lowerCamelCase_ , lowerCamelCase_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = re_prior_cond_conv_out.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
SCREAMING_SNAKE_CASE_ : Any = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : List[Any] = re_prior_cond_conv_out.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_prior_cond_resnet.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = re_prior_cond_resnet.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'1': 1, '3': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : List[str] = F'conditioner_blocks.upsampler.upsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : Optional[int] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Dict = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : List[str] = re_prior_cond_resnet.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_prior_cond_proj_in.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Any = re_prior_cond_proj_in.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : List[Any] = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : List[Any] = re_prior_cond_proj_in.sub(lowerCamelCase_ , lowerCamelCase_ )
# keep original key
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = original_key
SCREAMING_SNAKE_CASE_ : Optional[Any] = replace_key(lowerCamelCase_ )
if F'{key_prefix}.{key}' not in model_state_dict or key is None:
print(F'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape:
SCREAMING_SNAKE_CASE_ : str = model_state_dict[F'{key_prefix}.{key}']
print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
SCREAMING_SNAKE_CASE_ : Dict = original_key
SCREAMING_SNAKE_CASE_ : int = original_key
SCREAMING_SNAKE_CASE_ : int = value
return new_dict
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : int=None , lowerCamelCase_ : int=None ) -> Dict:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
SCREAMING_SNAKE_CASE_ : int = requests.get(F'{PREFIX}{file}' , allow_redirects=lowerCamelCase_ )
os.makedirs(F'{pytorch_dump_folder_path}/' , exist_ok=lowerCamelCase_ )
open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , 'wb' ).write(r.content )
SCREAMING_SNAKE_CASE_ : List[str] = MODEL_MAPPING[model_name.split('/' )[-1]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = JukeboxConfig.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = JukeboxModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
for i, dict_name in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : str = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['model']
SCREAMING_SNAKE_CASE_ : int = {}
for k in old_dic.keys():
if k.endswith('.b' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = old_dic[k]
elif k.endswith('.w' ):
SCREAMING_SNAKE_CASE_ : str = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
SCREAMING_SNAKE_CASE_ : int = old_dic[k]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = old_dic[k]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'vqvae' if i == 0 else F'priors.{3 - i}'
SCREAMING_SNAKE_CASE_ : Any = fix_jukebox_keys(lowerCamelCase_ , model.state_dict() , lowerCamelCase_ , lowerCamelCase_ )
weight_dict.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
with open(F'{pytorch_dump_folder_path}/mapping.json' , 'w' ) as txtfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase_ )
return weight_dict
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
UpperCamelCase__ : str = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 105 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowercase : Tuple = BlenderbotSmallTokenizer
_lowercase : Any = False
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
_lowerCAmelCase = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_lowerCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
_lowerCAmelCase = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_lowerCAmelCase = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__a )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = 'adapt act apte'
_lowerCAmelCase = 'adapt act apte'
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase = 'adapt act apte'
_lowerCAmelCase = ['adapt', 'act', 'ap@@', 'te']
_lowerCAmelCase = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
_lowerCAmelCase = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_lowerCAmelCase = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [1_384]
_lowerCAmelCase = 'I am a small frog.'
_lowerCAmelCase = tok([src_text] , padding=__a , truncation=__a )['input_ids']
_lowerCAmelCase = tok.batch_decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
_lowerCAmelCase = 'I am a small frog .'
_lowerCAmelCase = '.'
_lowerCAmelCase = tok(__a )['input_ids']
_lowerCAmelCase = tok(__a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 717 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def A (__lowerCamelCase :str ):
_lowerCAmelCase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_lowerCAmelCase = 128
elif "12-12" in model_name:
_lowerCAmelCase = 12
_lowerCAmelCase = 12
elif "14-14" in model_name:
_lowerCAmelCase = 14
_lowerCAmelCase = 14
elif "16-16" in model_name:
_lowerCAmelCase = 16
_lowerCAmelCase = 16
else:
raise ValueError("""Model not supported""" )
_lowerCAmelCase = """huggingface/label-files"""
if "speech-commands" in model_name:
_lowerCAmelCase = 35
_lowerCAmelCase = """speech-commands-v2-id2label.json"""
else:
_lowerCAmelCase = 527
_lowerCAmelCase = """audioset-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def A (__lowerCamelCase :Dict ):
if "module.v" in name:
_lowerCAmelCase = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
_lowerCAmelCase = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
_lowerCAmelCase = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
_lowerCAmelCase = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_lowerCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
_lowerCAmelCase = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
_lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
_lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_lowerCAmelCase = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
_lowerCAmelCase = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
_lowerCAmelCase = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Any ):
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(__lowerCamelCase )
if "qkv" in key:
_lowerCAmelCase = key.split(""".""" )
_lowerCAmelCase = int(key_split[3] )
_lowerCAmelCase = config.hidden_size
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = val
return orig_state_dict
def A (__lowerCamelCase :str ):
_lowerCAmelCase = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
@torch.no_grad()
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :Union[str, Any] , __lowerCamelCase :List[Any]=False ):
_lowerCAmelCase = get_audio_spectrogram_transformer_config(__lowerCamelCase )
_lowerCAmelCase = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
_lowerCAmelCase = model_name_to_url[model_name]
_lowerCAmelCase = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )
# remove some keys
remove_keys(__lowerCamelCase )
# rename some keys
_lowerCAmelCase = convert_state_dict(__lowerCamelCase , __lowerCamelCase )
# load 🤗 model
_lowerCAmelCase = ASTForAudioClassification(__lowerCamelCase )
model.eval()
model.load_state_dict(__lowerCamelCase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_lowerCAmelCase = -4.2_677_393 if """speech-commands""" not in model_name else -6.845_978
_lowerCAmelCase = 4.5_689_974 if """speech-commands""" not in model_name else 5.5_654_526
_lowerCAmelCase = 1024 if """speech-commands""" not in model_name else 128
_lowerCAmelCase = ASTFeatureExtractor(mean=__lowerCamelCase , std=__lowerCamelCase , max_length=__lowerCamelCase )
if "speech-commands" in model_name:
_lowerCAmelCase = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
_lowerCAmelCase = dataset[0]["""audio"""]["""array"""]
else:
_lowerCAmelCase = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
_lowerCAmelCase , _lowerCAmelCase = torchaudio.load(__lowerCamelCase )
_lowerCAmelCase = waveform.squeeze().numpy()
_lowerCAmelCase = feature_extractor(__lowerCamelCase , sampling_rate=16000 , return_tensors="""pt""" )
# forward pass
_lowerCAmelCase = model(**__lowerCamelCase )
_lowerCAmelCase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_lowerCAmelCase = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_lowerCAmelCase = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_lowerCAmelCase = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_lowerCAmelCase = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_lowerCAmelCase = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_lowerCAmelCase = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_lowerCAmelCase = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
_lowerCAmelCase = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowercase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 162 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = inspect.getfile(accelerate.test_utils )
UpperCamelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCamelCase = test_metrics
@require_cpu
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
UpperCamelCase = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
| 282 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =(DEISMultistepScheduler,)
SCREAMING_SNAKE_CASE_ : List[str] =(("num_inference_steps", 25),)
def __lowerCAmelCase ( self : Tuple , **SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
UpperCamelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict=0 , **SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase , UpperCamelCase = sample, sample
for t in range(SCREAMING_SNAKE_CASE__ , time_step + scheduler.config.solver_order + 1 ):
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
UpperCamelCase = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , **SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
UpperCamelCase = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
if scheduler is None:
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = 10
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = dict(self.forward_default_kwargs )
UpperCamelCase = kwargs.pop('num_inference_steps' , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
UpperCamelCase = self.get_scheduler_config()
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.dummy_sample
UpperCamelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , 'set_timesteps' ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , 'set_timesteps' ):
UpperCamelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order]
UpperCamelCase = scheduler.timesteps[5]
UpperCamelCase = scheduler.timesteps[6]
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
UpperCamelCase = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
UpperCamelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCamelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCamelCase = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCamelCase = DEISMultistepScheduler.from_config(scheduler.config )
UpperCamelCase = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def __lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , algorithm_type='deis' , solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
UpperCamelCase = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
assert not torch.isnan(SCREAMING_SNAKE_CASE__ ).any(), "Samples have nan numbers"
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ , time_step=0 )
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.full_loop()
UpperCamelCase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.full_loop(prediction_type='v_prediction' )
UpperCamelCase = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.scheduler_classes[0]
UpperCamelCase = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE__ , dynamic_thresholding_ratio=0 )
UpperCamelCase = scheduler_class(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = 10
UpperCamelCase = self.dummy_model()
UpperCamelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
assert sample.dtype == torch.floataa
| 282 | 1 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = len(__SCREAMING_SNAKE_CASE )
lowercase = []
for i in range(len(__SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowercase = True
for j in range(__SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowercase = False
break
if match_found:
position.append(__SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 565 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''DeiTFeatureExtractor''']
UpperCAmelCase = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 565 | 1 |
'''simple docstring'''
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
_A = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def A_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Any=None ) -> str:
__SCREAMING_SNAKE_CASE : Dict = True
while ask_again:
__SCREAMING_SNAKE_CASE : int = input(_lowerCamelCase )
try:
if default is not None and len(_lowerCamelCase ) == 0:
return default
return convert_value(_lowerCamelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowerCamelCase )
def A_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=[] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : List[Any]=0 ) -> List[str]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = BulletMenu(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = menu.run(default_choice=_lowerCamelCase )
return convert_value(_lowerCamelCase ) if convert_value is not None else result
def A_ ( __SCREAMING_SNAKE_CASE : str ) -> List[str]:
__SCREAMING_SNAKE_CASE : List[str] = int(_lowerCamelCase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A_ ( __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Tuple = int(_lowerCamelCase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A_ ( __SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : int = int(_lowerCamelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A_ ( __SCREAMING_SNAKE_CASE : Dict ) -> int:
__SCREAMING_SNAKE_CASE : Optional[int] = int(_lowerCamelCase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A_ ( __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE : List[Any] = int(_lowerCamelCase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A_ ( __SCREAMING_SNAKE_CASE : Dict ) -> Any:
return {"yes": True, "no": False}[value.lower()]
class SCREAMING_SNAKE_CASE_ ( argparse.RawDescriptionHelpFormatter ):
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = super()._format_usage(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Dict = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 158 | '''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
__A = DistilBertTokenizer
__A = DistilBertTokenizerFast
__A = True
@slow
def a ( self : Optional[int] ):
"""simple docstring"""
_lowerCAmelCase = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
_lowerCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=__lowerCAmelCase )
_lowerCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=__lowerCAmelCase )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 309 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = BarthezTokenizer
lowercase__ = BarthezTokenizerFast
lowercase__ = True
lowercase__ = True
def lowercase__ ( self : int ):
'''simple docstring'''
super().setUp()
lowercase__ = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname, legacy_format=lowerCamelCase )
lowercase__ = tokenizer
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = '''<pad>'''
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ), lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ), lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<s>''' )
self.assertEqual(vocab_keys[1], '''<pad>''' )
self.assertEqual(vocab_keys[-1], '''<mask>''' )
self.assertEqual(len(lowerCamelCase ), 101_122 )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 101_122 )
@require_torch
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase__ = [0, 57, 3_018, 70_307, 91, 2]
lowercase__ = self.tokenizer(
lowerCamelCase, max_length=len(lowerCamelCase ), padding=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''' )
self.assertIsInstance(lowerCamelCase, lowerCamelCase )
self.assertEqual((2, 6), batch.input_ids.shape )
self.assertEqual((2, 6), batch.attention_mask.shape )
lowercase__ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = tokenizer.tokenize(lowerCamelCase )
lowercase__ = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
lowercase__ = tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase )
lowercase__ = rust_tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCamelCase )
lowercase__ = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase, lowerCamelCase )
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = {'''input_ids''': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase__ = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase, model_name='''moussaKam/mbarthez''', revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''', sequences=lowerCamelCase, )
| 710 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCAmelCase__ ( lowerCamelCase_ : str ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__a : Tuple = model_type_to_module_name(lowerCAmelCase_ )
__a : Union[str, Any] = importlib.import_module(f'''.{module_name}''' , 'transformers.models' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__a : List[Any] = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : Tuple = False , lowerCamelCase_ : Union[str, Any] = False , lowerCamelCase_ : int = None , lowerCamelCase_ : Union[str, Any] = None , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : Any = False , **lowerCamelCase_ : List[Any] , ):
__a : Dict = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase_ , encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase_ )
class _UpperCamelCase:
def __init__( self : Any ):
'''simple docstring'''
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase )
def __lowerCAmelCase ( cls : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a : Optional[Any] = kwargs.pop('config' , __lowerCamelCase )
__a : Optional[Any] = kwargs.pop('trust_remote_code' , __lowerCamelCase )
__a : Optional[Any] = True
__a , __a : Optional[Any] = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase )
__a : Dict = config_dict.get('image_processor_type' , __lowerCamelCase )
__a : Dict = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
__a : List[Any] = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__a : List[Any] = config_dict.pop('feature_extractor_type' , __lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
__a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
__a : List[str] = config_dict['auto_map']['AutoFeatureExtractor']
__a : Any = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__a : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# It could be in `config.image_processor_type``
__a : str = getattr(__lowerCamelCase , 'image_processor_type' , __lowerCamelCase )
if hasattr(__lowerCamelCase , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
__a : List[Any] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
__a : int = image_processor_class_from_name(__lowerCamelCase )
__a : Any = image_processor_auto_map is not None
__a : Dict = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
__a : int = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if has_remote_code and trust_remote_code:
__a : Tuple = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
__a : Any = kwargs.pop('code_revision' , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
__a : Any = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )]
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
| 47 |
"""simple docstring"""
from math import sqrt
def snake_case ( lowerCAmelCase_ = 1000000 ) -> int:
_snake_case = 0
_snake_case = 0
_snake_case = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowerCAmelCase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 103 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : List[str] = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = "trocr"
lowerCamelCase = ["past_key_values"]
lowerCamelCase = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : Dict , __magic_name__ : Dict=5_0265 , __magic_name__ : Any=1024 , __magic_name__ : List[Any]=12 , __magic_name__ : str=16 , __magic_name__ : Optional[int]=4096 , __magic_name__ : int="gelu" , __magic_name__ : str=512 , __magic_name__ : List[Any]=0.1 , __magic_name__ : List[str]=0.0 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Optional[int]=2 , __magic_name__ : Any=0.02 , __magic_name__ : Tuple=0.0 , __magic_name__ : Dict=True , __magic_name__ : List[Any]=False , __magic_name__ : Dict=True , __magic_name__ : int=True , __magic_name__ : List[Any]=1 , __magic_name__ : Dict=0 , __magic_name__ : Optional[int]=2 , **__magic_name__ : Optional[int] , ) -> List[Any]:
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : str = d_model
lowerCamelCase_ : int = decoder_layers
lowerCamelCase_ : Optional[int] = decoder_attention_heads
lowerCamelCase_ : int = decoder_ffn_dim
lowerCamelCase_ : List[Any] = activation_function
lowerCamelCase_ : Union[str, Any] = max_position_embeddings
lowerCamelCase_ : Union[str, Any] = dropout
lowerCamelCase_ : Optional[int] = attention_dropout
lowerCamelCase_ : Any = activation_dropout
lowerCamelCase_ : Union[str, Any] = init_std
lowerCamelCase_ : Union[str, Any] = decoder_layerdrop
lowerCamelCase_ : List[str] = use_cache
lowerCamelCase_ : Tuple = scale_embedding
lowerCamelCase_ : Tuple = use_learned_position_embeddings
lowerCamelCase_ : Optional[int] = layernorm_embedding
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
| 253 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = "gpt_bigcode"
lowerCamelCase = ["past_key_values"]
lowerCamelCase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , __magic_name__ : List[str]=5_0257 , __magic_name__ : Optional[int]=1024 , __magic_name__ : Dict=768 , __magic_name__ : str=12 , __magic_name__ : Union[str, Any]=12 , __magic_name__ : Tuple=None , __magic_name__ : List[str]="gelu_pytorch_tanh" , __magic_name__ : int=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Union[str, Any]=1e-5 , __magic_name__ : List[str]=0.02 , __magic_name__ : Tuple=True , __magic_name__ : List[Any]=True , __magic_name__ : Tuple=5_0256 , __magic_name__ : str=5_0256 , __magic_name__ : List[Any]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Optional[Any]=True , **__magic_name__ : int , ) -> Optional[Any]:
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : Union[str, Any] = n_positions
lowerCamelCase_ : str = n_embd
lowerCamelCase_ : Tuple = n_layer
lowerCamelCase_ : int = n_head
lowerCamelCase_ : Union[str, Any] = n_inner
lowerCamelCase_ : int = activation_function
lowerCamelCase_ : Union[str, Any] = resid_pdrop
lowerCamelCase_ : Optional[Any] = embd_pdrop
lowerCamelCase_ : Dict = attn_pdrop
lowerCamelCase_ : Union[str, Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Optional[int] = scale_attn_weights
lowerCamelCase_ : Any = use_cache
lowerCamelCase_ : Dict = attention_softmax_in_fpaa
lowerCamelCase_ : Any = scale_attention_softmax_in_fpaa
lowerCamelCase_ : Tuple = multi_query
lowerCamelCase_ : Tuple = bos_token_id
lowerCamelCase_ : int = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 253 | 1 |
'''simple docstring'''
import numpy as np
from PIL import Image
def _A ( A ,A ,A ) -> np.ndarray:
lowercase : List[Any] = np.array(A )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
lowercase : List[Any] = 0
lowercase : List[Any] = 0
lowercase : Union[str, Any] = 0
lowercase : List[str] = 0
# compute the shape of the output matrix
lowercase : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowercase : Optional[int] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowercase : List[str] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowercase : int = 0
lowercase : Tuple = 0
return updated_arr
def _A ( A ,A ,A ) -> np.ndarray:
lowercase : Union[str, Any] = np.array(A )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
lowercase : Tuple = 0
lowercase : List[Any] = 0
lowercase : Optional[Any] = 0
lowercase : Tuple = 0
# compute the shape of the output matrix
lowercase : List[str] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowercase : List[str] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowercase : Optional[Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowercase : Any = 0
lowercase : str = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
lowerCAmelCase : List[str] = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 372 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase : Optional[Any] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( A ,A ,A ,A ,A ) -> Tuple:
for attribute in key.split("." ):
lowercase : Dict = getattr(A ,A )
if weight_type is not None:
lowercase : List[str] = getattr(A ,A ).shape
else:
lowercase : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase : List[Any] = value
elif weight_type == "weight_g":
lowercase : List[Any] = value
elif weight_type == "weight_v":
lowercase : int = value
elif weight_type == "bias":
lowercase : Any = value
elif weight_type == "running_mean":
lowercase : Tuple = value
elif weight_type == "running_var":
lowercase : Dict = value
elif weight_type == "num_batches_tracked":
lowercase : Optional[int] = value
elif weight_type == "inv_freq":
lowercase : List[Any] = value
else:
lowercase : Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( A ,A ,A ) -> int:
lowercase : Optional[int] = []
lowercase : Tuple = fairseq_model.state_dict()
lowercase : Optional[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase : str = False
if "conv_layers" in name:
load_conv_layer(
A ,A ,A ,A ,hf_model.config.feat_extract_norm == "group" ,)
lowercase : Dict = True
else:
for key, mapped_key in MAPPING.items():
lowercase : List[str] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase : List[str] = True
if "*" in mapped_key:
lowercase : int = name.split(A )[0].split("." )[-2]
lowercase : Optional[Any] = mapped_key.replace("*" ,A )
if "pos_bias_u" in name:
lowercase : str = None
elif "pos_bias_v" in name:
lowercase : Optional[int] = None
elif "weight_g" in name:
lowercase : int = "weight_g"
elif "weight_v" in name:
lowercase : Dict = "weight_v"
elif "bias" in name:
lowercase : List[str] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Optional[int] = "weight"
elif "running_mean" in name:
lowercase : Optional[int] = "running_mean"
elif "inv_freq" in name:
lowercase : Dict = "inv_freq"
elif "running_var" in name:
lowercase : int = "running_var"
elif "num_batches_tracked" in name:
lowercase : Optional[Any] = "num_batches_tracked"
else:
lowercase : int = None
set_recursively(A ,A ,A ,A ,A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _A ( A ,A ,A ,A ,A ) -> List[str]:
lowercase : Tuple = full_name.split("conv_layers." )[-1]
lowercase : Optional[Any] = name.split("." )
lowercase : str = int(items[0] )
lowercase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
@torch.no_grad()
def _A ( A ,A ,A=None ,A=None ,A=True ) -> Optional[Any]:
if config_path is not None:
lowercase : Tuple = WavaVecaConformerConfig.from_pretrained(A ,hidden_act="swish" )
else:
lowercase : Any = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase : str = "rotary"
if is_finetuned:
if dict_path:
lowercase : List[str] = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase : Optional[int] = target_dict.pad_index
lowercase : Optional[int] = target_dict.bos_index
lowercase : Optional[Any] = target_dict.eos_index
lowercase : str = len(target_dict.symbols )
lowercase : List[Any] = os.path.join(A ,"vocab.json" )
if not os.path.isdir(A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(A ) )
return
os.makedirs(A ,exist_ok=A )
lowercase : Any = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase : Any = 0
lowercase : Tuple = 1
with open(A ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(A ,A )
lowercase : Tuple = WavaVecaCTCTokenizer(
A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=A ,)
lowercase : Dict = True if config.feat_extract_norm == "layer" else False
lowercase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6_0_0_0 ,padding_value=0 ,do_normalize=A ,return_attention_mask=A ,)
lowercase : List[str] = WavaVecaProcessor(feature_extractor=A ,tokenizer=A )
processor.save_pretrained(A )
lowercase : Any = WavaVecaConformerForCTC(A )
else:
lowercase : str = WavaVecaConformerForPreTraining(A )
if is_finetuned:
lowercase , lowercase , lowercase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowercase : List[str] = argparse.Namespace(task="audio_pretraining" )
lowercase : Union[str, Any] = fairseq.tasks.setup_task(A )
lowercase , lowercase , lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=A )
lowercase : List[str] = model[0].eval()
recursively_load_weights(A ,A ,not is_finetuned )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCAmelCase : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 372 | 1 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = args.pruning_method
SCREAMING_SNAKE_CASE : List[Any] = args.threshold
SCREAMING_SNAKE_CASE : Optional[int] = args.model_name_or_path.rstrip('''/''' )
SCREAMING_SNAKE_CASE : Optional[Any] = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
SCREAMING_SNAKE_CASE : List[Any] = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
SCREAMING_SNAKE_CASE : str = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
SCREAMING_SNAKE_CASE : Optional[int] = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE : Optional[Any] = name[:-6]
SCREAMING_SNAKE_CASE : Tuple = model[F'''{prefix_}mask_scores''']
SCREAMING_SNAKE_CASE : List[Any] = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE : Any = name[:-6]
SCREAMING_SNAKE_CASE : List[Any] = model[F'''{prefix_}mask_scores''']
SCREAMING_SNAKE_CASE : Tuple = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE : Tuple = name[:-6]
SCREAMING_SNAKE_CASE : Optional[int] = model[F'''{prefix_}mask_scores''']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = -0.1, 1.1
SCREAMING_SNAKE_CASE : Any = torch.sigmoid(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = s * (r - l) + l
SCREAMING_SNAKE_CASE : Optional[int] = s_bar.clamp(min=0.0 , max=1.0 )
SCREAMING_SNAKE_CASE : Any = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
os.path.dirname(_SCREAMING_SNAKE_CASE ) , F'''bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}''' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
snake_case_ = parser.parse_args()
main(args)
| 355 |
'''simple docstring'''
def __lowercase (_SCREAMING_SNAKE_CASE :int ):
SCREAMING_SNAKE_CASE : Tuple = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __lowercase (_SCREAMING_SNAKE_CASE :int ):
SCREAMING_SNAKE_CASE : List[Any] = 0
while number > 0:
SCREAMING_SNAKE_CASE : List[str] = number % 10
sum_of_digits += last_digit
SCREAMING_SNAKE_CASE : List[str] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __lowercase (_SCREAMING_SNAKE_CASE :int = 1_00 ):
SCREAMING_SNAKE_CASE : List[Any] = factorial(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = split_and_add(_SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 355 | 1 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__A = None
try:
import msvcrt
except ImportError:
__A = None
try:
import fcntl
except ImportError:
__A = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__A = OSError
# Data
# ------------------------------------------------
__A = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
__A = """3.0.12"""
__A = None
def __A () ->Dict:
"""simple docstring"""
global _logger
lowerCAmelCase__ :Optional[int] = _logger or logging.getLogger(__name__ )
return _logger
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = lock_file
return None
def __str__( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.lock.release()
return None
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
lowerCAmelCase__ :int = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase )
# The path to the lock file.
lowerCAmelCase__ :Optional[int] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowerCAmelCase__ :Any = None
# The default timeout value.
lowerCAmelCase__ :Optional[int] = timeout
# We use this lock primarily for the lock counter.
lowerCAmelCase__ :Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowerCAmelCase__ :List[Any] = 0
return None
@property
def snake_case ( self ):
'''simple docstring'''
return self._lock_file
@property
def snake_case ( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = float(__UpperCAmelCase )
return None
def snake_case ( self ):
'''simple docstring'''
raise NotImplementedError()
def snake_case ( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def snake_case ( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ):
'''simple docstring'''
if timeout is None:
lowerCAmelCase__ :Dict = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowerCAmelCase__ :Dict = id(self )
lowerCAmelCase__ :List[str] = self._lock_file
lowerCAmelCase__ :str = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__UpperCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowerCAmelCase__ :str = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def snake_case ( self , __UpperCAmelCase=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowerCAmelCase__ :List[str] = id(self )
lowerCAmelCase__ :int = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
lowerCAmelCase__ :List[Any] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=__UpperCAmelCase )
return None
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = os.path.basename(__UpperCAmelCase )
if len(__UpperCAmelCase ) > max_length and max_length > 0:
lowerCAmelCase__ :List[str] = os.path.dirname(__UpperCAmelCase )
lowerCAmelCase__ :int = str(hash(__UpperCAmelCase ) )
lowerCAmelCase__ :List[Any] = filename[: max_length - len(__UpperCAmelCase ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(__UpperCAmelCase , __UpperCAmelCase )
else:
return path
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
lowerCAmelCase__ :Any = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowerCAmelCase__ :List[str] = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__UpperCAmelCase )
else:
lowerCAmelCase__ :Any = fd
return None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self._lock_file_fd
lowerCAmelCase__ :int = None
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(__UpperCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowerCAmelCase__ :Optional[Any] = os.open(self._lock_file , __UpperCAmelCase )
try:
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[Any] = fd
return None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self._lock_file_fd
lowerCAmelCase__ :Optional[Any] = None
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN )
os.close(__UpperCAmelCase )
return None
class _lowerCAmelCase ( a ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowerCAmelCase__ :int = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
lowerCAmelCase__ :Optional[Any] = fd
return None
def snake_case ( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
lowerCAmelCase__ :Union[str, Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__A = None
if msvcrt:
__A = WindowsFileLock
elif fcntl:
__A = UnixFileLock
else:
__A = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 93 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 1 |
"""simple docstring"""
from itertools import permutations
def UpperCAmelCase__ ( A__ ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase__ = [7, 11, 13, 17]
for i, test in enumerate(A__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase__ ( A__ = 10 ) -> int:
"""simple docstring"""
return sum(
int("".join(map(A__ , A__ ) ) )
for num in permutations(range(A__ ) )
if is_substring_divisible(A__ ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 274 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
SCREAMING_SNAKE_CASE_ : Dict = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 274 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
a = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
a = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
@lru_cache()
def UpperCamelCase_( ):
"""simple docstring"""
_lowerCAmelCase :Optional[int] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_lowerCAmelCase :str = bs[:]
_lowerCAmelCase :Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase :Tuple = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def UpperCamelCase_( __magic_name__ : List[Any] ):
"""simple docstring"""
_lowerCAmelCase :int = set()
_lowerCAmelCase :List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase :str = char
return pairs
class UpperCAmelCase_ (A__ ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self: Optional[int] , _UpperCAmelCase: Dict , _UpperCAmelCase: Tuple , _UpperCAmelCase: Any="replace" , _UpperCAmelCase: List[Any]="<s>" , _UpperCAmelCase: Union[str, Any]="</s>" , _UpperCAmelCase: Dict="</s>" , _UpperCAmelCase: Optional[Any]="<s>" , _UpperCAmelCase: str="<unk>" , _UpperCAmelCase: List[Any]="<pad>" , _UpperCAmelCase: Optional[Any]="<mask>" , _UpperCAmelCase: int=False , **_UpperCAmelCase: str , ):
_lowerCAmelCase :Any = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else bos_token
_lowerCAmelCase :int = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else eos_token
_lowerCAmelCase :int = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else sep_token
_lowerCAmelCase :List[Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else cls_token
_lowerCAmelCase :Any = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else unk_token
_lowerCAmelCase :Union[str, Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase :List[Any] = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
errors=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , add_prefix_space=_snake_case , **_snake_case , )
with open(_snake_case , encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase :str = json.load(_snake_case )
_lowerCAmelCase :Optional[Any] = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase :Dict = errors # how to handle errors in decoding
_lowerCAmelCase :Any = bytes_to_unicode()
_lowerCAmelCase :Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_snake_case , encoding='utf-8' ) as merges_handle:
_lowerCAmelCase :Optional[Any] = merges_handle.read().split('\n' )[1:-1]
_lowerCAmelCase :Any = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase :List[Any] = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
_lowerCAmelCase :List[str] = {}
_lowerCAmelCase :str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase :int = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self: int ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: List[Any] ):
if token in self.cache:
return self.cache[token]
_lowerCAmelCase :Optional[int] = tuple(_snake_case )
_lowerCAmelCase :Tuple = get_pairs(_snake_case )
if not pairs:
return token
while True:
_lowerCAmelCase :Union[str, Any] = min(_snake_case , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_snake_case , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = bigram
_lowerCAmelCase :str = []
_lowerCAmelCase :str = 0
while i < len(_snake_case ):
try:
_lowerCAmelCase :Dict = word.index(_snake_case , _snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase :Optional[int] = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase :int = tuple(_snake_case )
_lowerCAmelCase :Dict = new_word
if len(_snake_case ) == 1:
break
else:
_lowerCAmelCase :Dict = get_pairs(_snake_case )
_lowerCAmelCase :int = ' '.join(_snake_case )
_lowerCAmelCase :Optional[int] = word
return word
def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Union[str, Any] ):
_lowerCAmelCase :str = []
for token in re.findall(self.pat , _snake_case ):
_lowerCAmelCase :Optional[Any] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_snake_case ).split(' ' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: List[Any] ):
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ):
return self.decoder.get(_snake_case )
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: Tuple ):
_lowerCAmelCase :List[str] = ''.join(_snake_case )
_lowerCAmelCase :Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[Any] = None ):
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase :List[str] = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase :List[str] = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case ) + '\n' )
_lowerCAmelCase :Any = 0
with open(_snake_case , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase :int = token_index
writer.write(' '.join(_snake_case ) + '\n' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Any = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase :Dict = [self.cls_token_id]
_lowerCAmelCase :Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: str = None , _UpperCAmelCase: Tuple = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: str = None ):
_lowerCAmelCase :str = [self.sep_token_id]
_lowerCAmelCase :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: str , _UpperCAmelCase: int=False , **_UpperCAmelCase: Tuple ):
_lowerCAmelCase :Dict = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_snake_case ) > 0 and not text[0].isspace()):
_lowerCAmelCase :str = ' ' + text
return (text, kwargs) | 687 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _a ( A__ ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case ):
_UpperCAmelCase =params
_UpperCAmelCase =np.array(_snake_case )
_UpperCAmelCase =np.array([len(_snake_case ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , _snake_case ):
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
return len(self.lengths )
def SCREAMING_SNAKE_CASE ( self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.params.max_model_input_size
_UpperCAmelCase =self.lengths > max_len
logger.info(F"Splitting {sum(_snake_case )} too long sequences." )
def divide_chunks(_snake_case , _snake_case ):
return [l[i : i + n] for i in range(0 , len(_snake_case ) , _snake_case )]
_UpperCAmelCase =[]
_UpperCAmelCase =[]
if self.params.mlm:
_UpperCAmelCase , _UpperCAmelCase =self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
_UpperCAmelCase , _UpperCAmelCase =self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_UpperCAmelCase =[]
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_UpperCAmelCase =np.insert(_snake_case , 0 , _snake_case )
if sub_s[-1] != sep_id:
_UpperCAmelCase =np.insert(_snake_case , len(_snake_case ) , _snake_case )
assert len(_snake_case ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_snake_case )
new_tok_ids.extend(_snake_case )
new_lengths.extend([len(_snake_case ) for l in sub_seqs] )
_UpperCAmelCase =np.array(_snake_case )
_UpperCAmelCase =np.array(_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =len(self )
_UpperCAmelCase =self.lengths > 11
_UpperCAmelCase =self.token_ids[indices]
_UpperCAmelCase =self.lengths[indices]
_UpperCAmelCase =len(self )
logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def SCREAMING_SNAKE_CASE ( self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
_UpperCAmelCase =self.params.special_tok_ids["unk_token"]
_UpperCAmelCase =len(self )
_UpperCAmelCase =np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_UpperCAmelCase =(unk_occs / self.lengths) < 0.5
_UpperCAmelCase =self.token_ids[indices]
_UpperCAmelCase =self.lengths[indices]
_UpperCAmelCase =len(self )
logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def SCREAMING_SNAKE_CASE ( self ):
if not self.params.is_master:
return
logger.info(F"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =[t[0] for t in batch]
_UpperCAmelCase =[t[1] for t in batch]
assert len(_snake_case ) == len(_snake_case )
# Max for paddings
_UpperCAmelCase =max(_snake_case )
# Pad token ids
if self.params.mlm:
_UpperCAmelCase =self.params.special_tok_ids["pad_token"]
else:
_UpperCAmelCase =self.params.special_tok_ids["unk_token"]
_UpperCAmelCase =[list(t.astype(_snake_case ) ) + [pad_idx] * (max_seq_len_ - len(_snake_case )) for t in token_ids]
assert len(tk_ ) == len(_snake_case )
assert all(len(_snake_case ) == max_seq_len_ for t in tk_ )
_UpperCAmelCase =torch.tensor(tk_ ) # (bs, max_seq_len_)
_UpperCAmelCase =torch.tensor(_snake_case ) # (bs)
return tk_t, lg_t
| 408 | 0 |
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch", "torchsde"]
def __init__(self : List[str] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Tuple , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any]) ->str:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"])
| 708 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "xlm-roberta-xl"
def __init__(self : Tuple , UpperCAmelCase_ : Union[str, Any]=250_880 , UpperCAmelCase_ : Optional[int]=2_560 , UpperCAmelCase_ : Union[str, Any]=36 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Dict=10_240 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[Any]=514 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Dict=1E-0_5 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Union[str, Any]="absolute" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : List[str] , ) ->int:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: List[str] =vocab_size
lowerCamelCase__: List[Any] =hidden_size
lowerCamelCase__: Any =num_hidden_layers
lowerCamelCase__: Optional[Any] =num_attention_heads
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: str =intermediate_size
lowerCamelCase__: List[Any] =hidden_dropout_prob
lowerCamelCase__: List[str] =attention_probs_dropout_prob
lowerCamelCase__: Union[str, Any] =max_position_embeddings
lowerCamelCase__: Optional[int] =type_vocab_size
lowerCamelCase__: Tuple =initializer_range
lowerCamelCase__: Tuple =layer_norm_eps
lowerCamelCase__: Dict =position_embedding_type
lowerCamelCase__: Optional[int] =use_cache
lowerCamelCase__: List[str] =classifier_dropout
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__: Union[str, Any] ={0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__: Optional[int] ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 437 | 0 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCAmelCase :List[Any] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
_lowerCAmelCase :Optional[Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
_lowerCAmelCase :str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def _UpperCamelCase ( self , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="auto" , lowercase__=-1 , lowercase__=0.9 , lowercase__=5 , lowercase__=500 , lowercase__="gpt2-large" , lowercase__=-1 , lowercase__=1_024 , lowercase__=25 , lowercase__=5 , lowercase__=True , lowercase__=25 , ) -> Dict:
SCREAMING_SNAKE_CASE : Any = compute_mauve(
p_text=_UpperCamelCase , q_text=_UpperCamelCase , p_features=_UpperCamelCase , q_features=_UpperCamelCase , p_tokens=_UpperCamelCase , q_tokens=_UpperCamelCase , num_buckets=_UpperCamelCase , pca_max_data=_UpperCamelCase , kmeans_explained_var=_UpperCamelCase , kmeans_num_redo=_UpperCamelCase , kmeans_max_iter=_UpperCamelCase , featurize_model_name=_UpperCamelCase , device_id=_UpperCamelCase , max_text_length=_UpperCamelCase , divergence_curve_discretization_size=_UpperCamelCase , mauve_scaling_factor=_UpperCamelCase , verbose=_UpperCamelCase , seed=_UpperCamelCase , )
return out
| 251 |
'''simple docstring'''
import math
import qiskit
def lowerCamelCase_ ( __UpperCamelCase : int = 1 , __UpperCamelCase : int = 1 , __UpperCamelCase : int = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(__UpperCamelCase , __UpperCamelCase )
or isinstance(__UpperCamelCase , __UpperCamelCase )
or isinstance(__UpperCamelCase , __UpperCamelCase )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(__UpperCamelCase ) != input_a)
or (math.floor(__UpperCamelCase ) != input_a)
or (math.floor(__UpperCamelCase ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
_A = qiskit.QuantumRegister(4 , 'qr' )
_A = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
_A = [input_a, input_a, carry_in]
_A = qiskit.QuantumCircuit(__UpperCamelCase , __UpperCamelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__UpperCamelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__UpperCamelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__UpperCamelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __UpperCamelCase ) # measure the last two qbits
_A = qiskit.Aer.get_backend('aer_simulator' )
_A = qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=1_0_0_0 )
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 292 | 0 |
'''simple docstring'''
A__ : List[str] ='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def A_ ( __SCREAMING_SNAKE_CASE : bytes ) -> bytes:
"""simple docstring"""
if not isinstance(a_ , a_ ):
__A : str = F"a bytes-like object is required, not '{data.__class__.__name__}'"
raise TypeError(a_ )
__A : Any = ''''''.join(bin(a_ )[2:].zfill(8 ) for byte in data )
__A : List[str] = len(a_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__A : List[str] = b'''=''' * ((6 - len(a_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(a_ ) % 6)
else:
__A : int = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(a_ ) , 6 ) ).encode()
+ padding
)
def A_ ( __SCREAMING_SNAKE_CASE : str ) -> bytes:
"""simple docstring"""
if not isinstance(a_ , a_ ) and not isinstance(a_ , a_ ):
__A : Optional[int] = (
'''argument should be a bytes-like object or ASCII string, '''
F"not '{encoded_data.__class__.__name__}'"
)
raise TypeError(a_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(a_ , a_ ):
try:
__A : Any = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
__A : List[str] = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(a_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__A : str = encoded_data[:-padding]
__A : int = ''''''.join(
bin(B64_CHARSET.index(a_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__A : Dict = ''''''.join(
bin(B64_CHARSET.index(a_ ) )[2:].zfill(6 ) for char in encoded_data )
__A : Any = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(a_ ) , 8 )
]
return bytes(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
A__ : Union[str, Any] =logging.get_logger(__name__)
A__ : List[str] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
A__ : str ={
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
A__ : List[Any] ={
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def A_ ( ) -> Tuple:
"""simple docstring"""
__A : Optional[int] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__A : List[Any] = bs[:]
__A : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
__A : int = [chr(__SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def A_ ( __SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
__A : str = set()
__A : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A : Optional[Any] = char
return pairs
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase =VOCAB_FILES_NAMES
lowerCamelCase =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase =['''input_ids''', '''attention_mask''']
def __init__( self : Any , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : Dict="replace" , lowerCamelCase : int="<s>" , lowerCamelCase : str="</s>" , lowerCamelCase : List[str]="</s>" , lowerCamelCase : Optional[Any]="<s>" , lowerCamelCase : Any="<unk>" , lowerCamelCase : str="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : Optional[int]=False , **lowerCamelCase : str , ):
"""simple docstring"""
__A : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
__A : Any = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__A : int = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
__A : Any = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
__A : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
__A : List[Any] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__A : List[str] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="""utf-8""" ) as vocab_handle:
__A : Optional[Any] = json.load(lowerCamelCase )
__A : Dict = {v: k for k, v in self.encoder.items()}
__A : List[Any] = errors # how to handle errors in decoding
__A : Optional[int] = bytes_to_unicode()
__A : str = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase , encoding="""utf-8""" ) as merges_handle:
__A : str = merges_handle.read().split("""\n""" )[1:-1]
__A : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
__A : List[Any] = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__A : List[str] = {}
__A : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__A : Any = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase_( self : Any ):
"""simple docstring"""
return len(self.encoder )
def lowercase_( self : Optional[int] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_( self : str , lowerCamelCase : Optional[int] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__A : Optional[Any] = tuple(lowerCamelCase )
__A : Dict = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__A : str = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__A , __A : Optional[Any] = bigram
__A : List[Any] = []
__A : Union[str, Any] = 0
while i < len(lowerCamelCase ):
try:
__A : str = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__A : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__A : Dict = tuple(lowerCamelCase )
__A : List[Any] = new_word
if len(lowerCamelCase ) == 1:
break
else:
__A : Optional[Any] = get_pairs(lowerCamelCase )
__A : Optional[int] = """ """.join(lowerCamelCase )
__A : str = word
return word
def lowercase_( self : str , lowerCamelCase : Optional[int] ):
"""simple docstring"""
__A : List[str] = []
for token in re.findall(self.pat , lowerCamelCase ):
__A : Optional[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(""" """ ) )
return bpe_tokens
def lowercase_( self : Dict , lowerCamelCase : Tuple ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_( self : str , lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase )
def lowercase_( self : Tuple , lowerCamelCase : int ):
"""simple docstring"""
__A : Any = """""".join(lowerCamelCase )
__A : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowercase_( self : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__A : Any = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__A : Optional[Any] = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + """\n""" )
__A : str = 0
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__A : Any = token_index
writer.write(""" """.join(lowerCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowercase_( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Tuple = [self.cls_token_id]
__A : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_( self : List[str] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def lowercase_( self : List[str] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
__A : int = [self.sep_token_id]
__A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_( self : Dict , lowerCamelCase : int , lowerCamelCase : Dict=False , **lowerCamelCase : List[Any] ):
"""simple docstring"""
__A : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
__A : Dict = """ """ + text
return (text, kwargs)
def lowercase_( self : Optional[int] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ):
"""simple docstring"""
__A : str = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__A : List[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__A : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__A : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase )
if needs_to_be_padded:
__A : str = len(lowerCamelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__A : List[Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
__A : Any = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 499 | 0 |
"""simple docstring"""
import requests
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = {"""Content-Type""": """application/json"""}
__lowerCAmelCase = requests.post(_lowerCAmelCase , json={"""text""": message_body} , headers=_lowerCAmelCase )
if response.status_code != 200:
__lowerCAmelCase = (
"""Request to slack returned an error """
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(_lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 465 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def lowercase (_lowerCAmelCase ):
def decorator(_lowerCAmelCase ):
__lowerCAmelCase = getattr(_lowerCAmelCase , """handle_key""" , [] )
handle += [key]
setattr(_lowerCAmelCase , """handle_key""" , _lowerCAmelCase )
return func
return decorator
def lowercase (*_lowerCAmelCase ):
def decorator(_lowerCAmelCase ):
__lowerCAmelCase = getattr(_lowerCAmelCase , """handle_key""" , [] )
handle += keys
setattr(_lowerCAmelCase , """handle_key""" , _lowerCAmelCase )
return func
return decorator
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __new__( cls , snake_case_ , snake_case_ , snake_case_ ) -> Any:
__lowerCAmelCase = super().__new__(cls , snake_case_ , snake_case_ , snake_case_ )
if not hasattr(snake_case_ , """key_handler""" ):
setattr(snake_case_ , """key_handler""" , {} )
setattr(snake_case_ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__lowerCAmelCase = getattr(snake_case_ , """handle_key""" , [] )
for key in handled_keys:
__lowerCAmelCase = value
return new_cls
@staticmethod
def A__ ( cls ) -> Tuple:
__lowerCAmelCase = get_character()
if char != KEYMAP["undefined"]:
__lowerCAmelCase = ord(snake_case_ )
__lowerCAmelCase = cls.key_handler.get(snake_case_ )
if handler:
__lowerCAmelCase = char
return handler(cls )
else:
return None
def lowercase (cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 465 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase ( _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a__ ( self : Optional[Any] , __lowerCamelCase : List[str]=0 ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = np.random.RandomState(__lowerCamelCase )
lowerCamelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def a__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_inputs()
lowerCamelCase__ = pipe(**__lowerCamelCase ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase__ = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : str ) -> int:
'''simple docstring'''
lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_inputs()
lowerCamelCase__ = pipe(**__lowerCamelCase ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase__ = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : Any ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_inputs()
lowerCamelCase__ = pipe(**__lowerCamelCase ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase__ = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : List[str] ) -> str:
'''simple docstring'''
lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_inputs()
lowerCamelCase__ = pipe(**__lowerCamelCase ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase__ = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_inputs()
lowerCamelCase__ = pipe(**__lowerCamelCase ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase__ = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : Dict ) -> str:
'''simple docstring'''
lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
lowerCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_inputs()
lowerCamelCase__ = pipe(**__lowerCamelCase ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
lowerCamelCase__ = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_inputs()
lowerCamelCase__ = 3 * [inputs["prompt"]]
# forward
lowerCamelCase__ = pipe(**__lowerCamelCase )
lowerCamelCase__ = output.images[0, -3:, -3:, -1]
lowerCamelCase__ = self.get_dummy_inputs()
lowerCamelCase__ = 3 * [inputs.pop("prompt" )]
lowerCamelCase__ = pipe.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=__lowerCamelCase , return_tensors="np" , )
lowerCamelCase__ = text_inputs["input_ids"]
lowerCamelCase__ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowerCamelCase__ = prompt_embeds
# forward
lowerCamelCase__ = pipe(**__lowerCamelCase )
lowerCamelCase__ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def a__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_inputs()
lowerCamelCase__ = 3 * ["this is a negative prompt"]
lowerCamelCase__ = negative_prompt
lowerCamelCase__ = 3 * [inputs["prompt"]]
# forward
lowerCamelCase__ = pipe(**__lowerCamelCase )
lowerCamelCase__ = output.images[0, -3:, -3:, -1]
lowerCamelCase__ = self.get_dummy_inputs()
lowerCamelCase__ = 3 * [inputs.pop("prompt" )]
lowerCamelCase__ = []
for p in [prompt, negative_prompt]:
lowerCamelCase__ = pipe.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=__lowerCamelCase , return_tensors="np" , )
lowerCamelCase__ = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowerCamelCase__ , lowerCamelCase__ = embeds
# forward
lowerCamelCase__ = pipe(**__lowerCamelCase )
lowerCamelCase__ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def a__ ( self : Any ) -> Tuple:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = ort.SessionOptions()
lowerCamelCase__ = False
return options
def a__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = "A painting of a squirrel eating a burger"
np.random.seed(0 )
lowerCamelCase__ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" )
lowerCamelCase__ = output.images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = "open neural network exchange"
lowerCamelCase__ = np.random.RandomState(0 )
lowerCamelCase__ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowerCamelCase , output_type="np" )
lowerCamelCase__ = output.images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self : List[str] ) -> Any:
'''simple docstring'''
lowerCamelCase__ = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = "open neural network exchange"
lowerCamelCase__ = np.random.RandomState(0 )
lowerCamelCase__ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowerCamelCase , output_type="np" )
lowerCamelCase__ = output.images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowerCamelCase__ = 0
def test_callback_fn(__lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : np.ndarray ) -> None:
lowerCamelCase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase__ = latents[0, -3:, -3:, -1]
lowerCamelCase__ = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase__ = latents[0, -3:, -3:, -1]
lowerCamelCase__ = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
lowerCamelCase__ = False
lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = "Andromeda galaxy in a bottle"
lowerCamelCase__ = np.random.RandomState(0 )
pipe(
prompt=__lowerCamelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def a__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert pipe.safety_checker is None
lowerCamelCase__ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
lowerCamelCase__ = OnnxStableDiffusionPipeline.from_pretrained(__lowerCamelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCamelCase__ = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 187 |
'''simple docstring'''
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
def update_area_of_max_square(lowercase__ , lowercase__) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowerCamelCase__ = update_area_of_max_square(lowercase__ , col + 1)
lowerCamelCase__ = update_area_of_max_square(row + 1 , col + 1)
lowerCamelCase__ = update_area_of_max_square(row + 1 , lowercase__)
if mat[row][col]:
lowerCamelCase__ = 1 + min([right, diagonal, down])
lowerCamelCase__ = max(largest_square_area[0] , lowercase__)
return sub_problem_sol
else:
return 0
lowerCamelCase__ = [0]
update_area_of_max_square(0 , 0)
return largest_square_area[0]
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
def update_area_of_max_square_using_dp_array(
lowercase__ , lowercase__ , lowercase__) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowerCamelCase__ = update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__)
lowerCamelCase__ = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__)
lowerCamelCase__ = update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__)
if mat[row][col]:
lowerCamelCase__ = 1 + min([right, diagonal, down])
lowerCamelCase__ = max(largest_square_area[0] , lowercase__)
lowerCamelCase__ = sub_problem_sol
return sub_problem_sol
else:
return 0
lowerCamelCase__ = [0]
lowerCamelCase__ = [[-1] * cols for _ in range(lowercase__)]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__)
return largest_square_area[0]
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
lowerCamelCase__ = [[0] * (cols + 1) for _ in range(rows + 1)]
lowerCamelCase__ = 0
for row in range(rows - 1 , -1 , -1):
for col in range(cols - 1 , -1 , -1):
lowerCamelCase__ = dp_array[row][col + 1]
lowerCamelCase__ = dp_array[row + 1][col + 1]
lowerCamelCase__ = dp_array[row + 1][col]
if mat[row][col] == 1:
lowerCamelCase__ = 1 + min(lowercase__ , lowercase__ , lowercase__)
lowerCamelCase__ = max(dp_array[row][col] , lowercase__)
else:
lowerCamelCase__ = 0
return largest_square_area
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
lowerCamelCase__ = [0] * (cols + 1)
lowerCamelCase__ = [0] * (cols + 1)
lowerCamelCase__ = 0
for row in range(rows - 1 , -1 , -1):
for col in range(cols - 1 , -1 , -1):
lowerCamelCase__ = current_row[col + 1]
lowerCamelCase__ = next_row[col + 1]
lowerCamelCase__ = next_row[col]
if mat[row][col] == 1:
lowerCamelCase__ = 1 + min(lowercase__ , lowercase__ , lowercase__)
lowerCamelCase__ = max(current_row[col] , lowercase__)
else:
lowerCamelCase__ = 0
lowerCamelCase__ = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 187 | 1 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) or not all(
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
_A = _A = _A = numbers[0]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
# update the maximum and minimum subarray products
_A = numbers[i]
if number < 0:
_A, _A = min_till_now, max_till_now
_A = max(_SCREAMING_SNAKE_CASE , max_till_now * number )
_A = min(_SCREAMING_SNAKE_CASE , min_till_now * number )
# update the maximum product found till now
_A = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return max_prod
| 27 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case__ : int = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case__ : Union[str, Any] = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ : Dict = False
snake_case__ : Optional[int] = False
def a_ ( self , a__ , a__ , a__=False ):
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class in get_values(a__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , ):
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : str = batch_size
__SCREAMING_SNAKE_CASE : int = seq_length
__SCREAMING_SNAKE_CASE : Any = is_training
__SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
__SCREAMING_SNAKE_CASE : Dict = use_labels
__SCREAMING_SNAKE_CASE : List[Any] = vocab_size
__SCREAMING_SNAKE_CASE : str = hidden_size
__SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
__SCREAMING_SNAKE_CASE : int = type_vocab_size
__SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = num_labels
__SCREAMING_SNAKE_CASE : Any = num_choices
__SCREAMING_SNAKE_CASE : List[str] = scope
__SCREAMING_SNAKE_CASE : Optional[int] = embedding_size
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = TFMobileBertModel(config=a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ )
__SCREAMING_SNAKE_CASE : Any = [input_ids, input_mask]
__SCREAMING_SNAKE_CASE : str = model(a__ )
__SCREAMING_SNAKE_CASE : int = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Tuple = TFMobileBertForMaskedLM(config=a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : str = TFMobileBertForNextSentencePrediction(config=a__ )
__SCREAMING_SNAKE_CASE : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Tuple = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = TFMobileBertForPreTraining(config=a__ )
__SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Optional[int] = model(a__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Any = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = TFMobileBertForSequenceClassification(config=a__ )
__SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Dict = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : List[Any] = self.num_choices
__SCREAMING_SNAKE_CASE : int = TFMobileBertForMultipleChoice(config=a__ )
__SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : Any = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : List[str] = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : int = TFMobileBertForTokenClassification(config=a__ )
__SCREAMING_SNAKE_CASE : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Any = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
__SCREAMING_SNAKE_CASE : Dict = TFMobileBertForQuestionAnswering(config=a__ )
__SCREAMING_SNAKE_CASE : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__SCREAMING_SNAKE_CASE : Optional[Any] = model(a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : Union[str, Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = TFMobileBertModelTest.TFMobileBertModelTester(self )
__SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*a__ )
@slow
def a_ ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
__SCREAMING_SNAKE_CASE : Any = TFMobileBertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE : str = model(a__ )[0]
__SCREAMING_SNAKE_CASE : Dict = [1, 6, 30522]
self.assertEqual(output.shape , a__ )
__SCREAMING_SNAKE_CASE : str = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1e-4 )
| 211 | 0 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
a : Optional[int] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
a : str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
a : List[Any] = '''zero2'''
a : Any = '''zero3'''
a : List[str] = [ZEROa, ZEROa]
def _UpperCamelCase ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = parameterized.to_safe_name("""_""".join(str(_A ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
a : List[str] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class a_ ( _UpperCAmelCase ):
@parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ) ->Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
self.run_and_check(
stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , )
@parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) ->Dict:
'''simple docstring'''
self.run_and_check(
stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
self.run_and_check(
stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , )
def _snake_case ( self : Tuple , __UpperCamelCase : Tuple ) ->Optional[Any]:
'''simple docstring'''
pass
def _snake_case ( self : str , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int = 10 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = models[model]
_UpperCAmelCase = self.run_trainer(
stage=__UpperCamelCase , model_name=__UpperCamelCase , eval_steps=__UpperCamelCase , num_train_epochs=1 , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , )
self.do_checks(__UpperCamelCase )
return output_dir
def _snake_case ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int = 10 , __UpperCamelCase : int = 1 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.get_auto_remove_tmp_dir("""./xxx""" , after=__UpperCamelCase )
_UpperCAmelCase = f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__UpperCamelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_UpperCAmelCase = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
_UpperCAmelCase = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
_UpperCAmelCase = self.get_launcher(__UpperCamelCase )
_UpperCAmelCase = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__UpperCamelCase , env=self.get_env() )
return output_dir
def _snake_case ( self : Optional[int] , __UpperCamelCase : int=False ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = min(2 , get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split() | 19 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A , _A , _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = requests.get(_A , headers=_A , allow_redirects=_A )
_UpperCAmelCase = result.headers["""Location"""]
_UpperCAmelCase = requests.get(_A , allow_redirects=_A )
_UpperCAmelCase = os.path.join(_A , F"""{artifact_name}.zip""" )
with open(_A , """wb""" ) as fp:
fp.write(response.content )
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = None
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_A ) as f:
for line in f:
_UpperCAmelCase = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase = line[: line.index(""": """ )]
_UpperCAmelCase = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_UpperCAmelCase = line[len("""FAILED """ ) :]
failed_tests.append(_A )
elif filename == "job_name.txt":
_UpperCAmelCase = line
if len(_A ) != len(_A ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_A )} for `errors` """
F"""and {len(_A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
_UpperCAmelCase = None
if job_name and job_links:
_UpperCAmelCase = job_links.get(_A , _A )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(_A , _A )]
return result
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = [os.path.join(_A , _A ) for p in os.listdir(_A ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_A , job_links=_A ) )
return errors
def _UpperCamelCase ( _A , _A=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_UpperCAmelCase = test.split("""/""" )[2]
else:
_UpperCAmelCase = None
return test
def _UpperCamelCase ( _A , _A=None ) -> Any:
"""simple docstring"""
_UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase = [x for x in logs if x[2] is not None]
_UpperCAmelCase = {x[2] for x in logs}
_UpperCAmelCase = {}
for test in tests:
_UpperCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase = {"""count""": n_errors, """errors""": error_counts}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = """| no. | error | status |"""
_UpperCAmelCase = """|-:|:-|:-|"""
_UpperCAmelCase = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase = reduced_by_error[error]["""count"""]
_UpperCAmelCase = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(_A )
return "\n".join(_A )
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = """| model | no. of errors | major error | count |"""
_UpperCAmelCase = """|-:|-:|-:|-:|"""
_UpperCAmelCase = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase = reduced_by_model[model]["""count"""]
_UpperCAmelCase ,_UpperCAmelCase = list(reduced_by_model[model]["""errors"""].items() )[0]
_UpperCAmelCase = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(_A )
return "\n".join(_A )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
a : Dict = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a : Tuple = get_job_links(args.workflow_run_id, token=args.token)
a : Tuple = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a : List[Any] = k.find(''' / ''')
a : Tuple = k[index + len(''' / ''') :]
a : int = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a : Optional[int] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a : int = reduce_by_error(errors)
a : str = reduce_by_model(errors)
a : int = make_github_table(reduced_by_error)
a : Optional[int] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa) | 19 | 1 |
from collections.abc import Callable
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = a
lowercase__ = b
if function(_lowerCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_lowerCamelCase ) == 0:
return b
elif (
function(_lowerCamelCase ) * function(_lowerCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
lowercase__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_lowerCamelCase ) == 0:
return mid
elif function(_lowerCamelCase ) * function(_lowerCamelCase ) < 0:
lowercase__ = mid
else:
lowercase__ = mid
lowercase__ = start + (end - start) / 2.0
return mid
def _lowerCAmelCase ( A__ ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 622 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( _lowerCamelCase : list[int] ): # This function is recursive
A__ = len(_lowerCamelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
A__ = array[0]
A__ = False
A__ = 1
A__ = []
while not is_found and i < array_length:
if array[i] < pivot:
A__ = True
A__ = [element for element in array[i:] if element >= array[i]]
A__ = longest_subsequence(_lowerCamelCase )
if len(_lowerCamelCase ) > len(_lowerCamelCase ):
A__ = temp_array
else:
i += 1
A__ = [element for element in array[1:] if element >= pivot]
A__ = [pivot, *longest_subsequence(_lowerCamelCase )]
if len(_lowerCamelCase ) > len(_lowerCamelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 440 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a__ ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.0_2 , _a=4 , ):
lowercase : Optional[Any] = parent
lowercase : Tuple = batch_size
lowercase : int = seq_length
lowercase : List[Any] = is_training
lowercase : Union[str, Any] = use_attention_mask
lowercase : str = use_token_type_ids
lowercase : Any = use_labels
lowercase : int = vocab_size
lowercase : str = hidden_size
lowercase : Tuple = num_hidden_layers
lowercase : Optional[int] = num_attention_heads
lowercase : List[Any] = intermediate_size
lowercase : List[Any] = hidden_act
lowercase : Optional[int] = hidden_dropout_prob
lowercase : str = attention_probs_dropout_prob
lowercase : Optional[Any] = max_position_embeddings
lowercase : Optional[Any] = type_vocab_size
lowercase : Dict = type_sequence_label_size
lowercase : Any = initializer_range
lowercase : Optional[Any] = num_choices
def __magic_name__ ( self ):
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Tuple = None
if self.use_attention_mask:
lowercase : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Dict = None
if self.use_token_type_ids:
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Tuple = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self ):
lowercase : Dict = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Union[str, Any] = config_and_inputs
lowercase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __magic_name__ ( self ):
lowercase : Dict = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : int = config_and_inputs
lowercase : Dict = True
lowercase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a__ ( UpperCAmelCase_, unittest.TestCase ):
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self ):
lowercase : List[str] = FlaxBertModelTester(self )
@slow
def __magic_name__ ( self ):
lowercase : List[str] = FlaxBertModel.from_pretrained("bert-base-cased" )
lowercase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(_snake_case )
| 708 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __magic_name__ ( ) -> int:
lowercase : Any = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=__snake_case , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=__snake_case , default=5 )
parser.add_argument("--batch_size" , type=__snake_case , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=__snake_case , default=1 )
parser.add_argument("--freeze" , type=__snake_case , default=__snake_case )
parser.add_argument("--learning_rate" , type=__snake_case , default=5E-4 )
parser.add_argument("--seed" , type=__snake_case , default=0 )
parser.add_argument("--lr_scheduler_type" , type=__snake_case , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=__snake_case , default=10 )
parser.add_argument("--weight_decay" , type=__snake_case , default=0.01 )
parser.add_argument("--output_dir" , type=__snake_case , default="./results" )
return parser.parse_args()
_A : Tuple = load("""accuracy""")
def __magic_name__ ( __snake_case : List[str] ) -> Tuple:
lowercase , lowercase : int = eval_pred
lowercase : Optional[int] = np.argmax(__snake_case , axis=1 )
return metric.compute(predictions=__snake_case , references=__snake_case )
class a__ ( a_ ):
def __init__( self , _a ):
super().__init__()
lowercase : List[Any] = trainer
def __magic_name__ ( self , _a , _a , _a , **_a ):
if control.should_evaluate:
lowercase : List[str] = deepcopy(_a )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def __magic_name__ ( ) -> List[Any]:
lowercase : List[str] = get_args()
set_seed(args.seed )
lowercase : Union[str, Any] = load_dataset("codeparrot/codecomplex" , split="train" )
lowercase : str = dataset.train_test_split(test_size=0.2 )
lowercase : Union[str, Any] = train_test["test"].train_test_split(test_size=0.5 )
lowercase : Tuple = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
lowercase : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase : List[Any] = tokenizer.eos_token
lowercase : Any = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowercase : Optional[Any] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowercase : Any = False
lowercase : Optional[int] = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(__snake_case : str ):
lowercase : int = tokenizer(example["src"] , truncation=__snake_case , max_length=1024 )
lowercase : Any = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowercase : Dict = train_test_validation.map(
__snake_case , batched=__snake_case , remove_columns=train_test_validation["train"].column_names , )
lowercase : List[Any] = DataCollatorWithPadding(tokenizer=__snake_case )
lowercase : Optional[Any] = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
lowercase : int = Trainer(
model=__snake_case , args=__snake_case , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , )
print("Training..." )
trainer.add_callback(CustomCallback(__snake_case ) )
trainer.train()
if __name__ == "__main__":
main()
| 518 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self ,UpperCamelCase ,UpperCamelCase=7 ,UpperCamelCase=3 ,UpperCamelCase=18 ,UpperCamelCase=30 ,UpperCamelCase=400 ,UpperCamelCase=True ,UpperCamelCase=None ,UpperCamelCase=True ,) -> Optional[int]:
snake_case__ :List[str] = size if size is not None else {"height": 18, "width": 18}
snake_case__ :List[str] = parent
snake_case__ :Dict = batch_size
snake_case__ :int = num_channels
snake_case__ :Union[str, Any] = image_size
snake_case__ :Optional[Any] = min_resolution
snake_case__ :Union[str, Any] = max_resolution
snake_case__ :Union[str, Any] = do_resize
snake_case__ :Union[str, Any] = size
snake_case__ :Tuple = apply_ocr
def lowerCAmelCase_ ( self ) -> List[Any]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( _A , unittest.TestCase ):
_A = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case__ :List[str] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase ,"do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase ,"size" ) )
self.assertTrue(hasattr(UpperCamelCase ,"apply_ocr" ) )
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case__ :Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 18, "width": 18} )
snake_case__ :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
snake_case__ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ :List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,Image.Image )
# Test not batched input
snake_case__ :List[Any] = image_processing(image_inputs[0] ,return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
self.assertIsInstance(encoding.words ,UpperCamelCase )
self.assertIsInstance(encoding.boxes ,UpperCamelCase )
# Test batched
snake_case__ :Optional[Any] = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
def lowerCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
snake_case__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ :Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase ,numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,np.ndarray )
# Test not batched input
snake_case__ :str = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
# Test batched
snake_case__ :str = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
def lowerCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
snake_case__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ :Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase ,torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase ,torch.Tensor )
# Test not batched input
snake_case__ :List[Any] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
# Test batched
snake_case__ :Optional[Any] = image_processing(UpperCamelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
def lowerCAmelCase_ ( self ) -> Optional[Any]:
# with apply_OCR = True
snake_case__ :Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case__ :Dict = load_dataset("hf-internal-testing/fixtures_docvqa" ,split="test" )
snake_case__ :Optional[int] = Image.open(ds[0]["file"] ).convert("RGB" )
snake_case__ :Union[str, Any] = image_processing(UpperCamelCase ,return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case__ :Union[str, Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
snake_case__ :Dict = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,UpperCamelCase )
self.assertListEqual(encoding.boxes ,UpperCamelCase )
# with apply_OCR = False
snake_case__ :str = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase )
snake_case__ :List[str] = image_processing(UpperCamelCase ,return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) ) | 241 |
from __future__ import annotations
def lowercase_ ( __snake_case : list[int] ) -> int:
'''simple docstring'''
if not nums:
return 0
snake_case__ :Union[str, Any] = nums[0]
snake_case__ :List[Any] = 0
for num in nums[1:]:
snake_case__ , snake_case__ :Optional[Any] = (
max_excluding + num,
max(__snake_case , __snake_case ),
)
return max(__snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod() | 241 | 1 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : List[Any] = PegasusTokenizer
__a : List[str] = PegasusTokenizerFast
__a : List[str] = True
__a : List[Any] = True
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def snake_case ( self ,**snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
return ("This is a test", "This is a test")
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = '</s>'
SCREAMING_SNAKE_CASE_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) ,snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<pad>' )
self.assertEqual(vocab_keys[1] ,'</s>' )
self.assertEqual(vocab_keys[-1] ,'v' )
self.assertEqual(len(snake_case__ ) ,1103 )
def snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,1103 )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : List[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE_ : Tuple = rust_tokenizer([raw_input_str] ,return_tensors=snake_case__ ,add_special_tokens=snake_case__ ).input_ids[0]
SCREAMING_SNAKE_CASE_ : Any = py_tokenizer([raw_input_str] ,return_tensors=snake_case__ ,add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE_ : Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
SCREAMING_SNAKE_CASE_ : Dict = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer([raw_input_str] ,return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
SCREAMING_SNAKE_CASE_ : List[str] = 'To ensure a smooth flow of bank resolutions.'
SCREAMING_SNAKE_CASE_ : int = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
SCREAMING_SNAKE_CASE_ : str = tokenizer([raw_input_str] ,return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ ,snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ['This is going to be way too long.' * 150, 'short example']
SCREAMING_SNAKE_CASE_ : Dict = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE_ : Any = self._large_tokenizer(snake_case__ ,padding=snake_case__ ,truncation=snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : List[str] = self._large_tokenizer(
text_target=snake_case__ ,max_length=5 ,padding=snake_case__ ,truncation=snake_case__ ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def snake_case ( self ):
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'input_ids': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ ,model_name='google/bigbird-pegasus-large-arxiv' ,revision='ba85d0851d708441f91440d509690f1ab6353415' ,)
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = PegasusTokenizer
__a : List[str] = PegasusTokenizerFast
__a : Dict = True
__a : str = True
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : List[str] = PegasusTokenizer(snake_case__ ,offset=0 ,mask_token_sent=snake_case__ ,mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def snake_case ( self ,**snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
return ("This is a test", "This is a test")
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Optional[int] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE_ : str = rust_tokenizer([raw_input_str] ,return_tensors=snake_case__ ,add_special_tokens=snake_case__ ).input_ids[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = py_tokenizer([raw_input_str] ,return_tensors=snake_case__ ,add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ ,snake_case__ )
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = ['This is going to be way too long.' * 1000, 'short example']
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE_ : str = self._large_tokenizer(snake_case__ ,padding=snake_case__ ,truncation=snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Any = self._large_tokenizer(
text_target=snake_case__ ,max_length=5 ,padding=snake_case__ ,truncation=snake_case__ ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
SCREAMING_SNAKE_CASE_ : Any = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] ,)
| 700 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( A__ ):
"""simple docstring"""
_a = ['image_processor', 'tokenizer']
_a = 'FlavaImageProcessor'
_a = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase_ , )
UpperCamelCase__ :Dict = kwargs.pop('''feature_extractor''' )
UpperCamelCase__ :Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :str = self.image_processor
def __call__( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = None , **UpperCamelCase_ , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCamelCase__ :int = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
if images is not None:
UpperCamelCase__ :Any = self.image_processor(
UpperCamelCase_ , return_image_mask=UpperCamelCase_ , return_codebook_pixels=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
if text is not None and images is not None:
encoding.update(UpperCamelCase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.tokenizer.model_input_names
UpperCamelCase__ :Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase_ , )
return self.image_processor_class
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCamelCase_ , )
return self.image_processor | 189 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = None
_a = None
_a = None
_a = None
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=512 , UpperCamelCase_="cls" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = project_dim
UpperCamelCase__ :List[str] = pooler_fn
UpperCamelCase__ :Dict = learn_encoder
UpperCamelCase__ :Any = use_attention_mask
class lowercase ( A__ ):
"""simple docstring"""
_a = [R'pooler', R'logit_scale']
_a = [R'position_ids', R'predictions.decoder.bias']
_a = 'roberta'
_a = RobertaSeriesConfig
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
super().__init__(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = XLMRobertaModel(UpperCamelCase_ )
UpperCamelCase__ :Tuple = nn.Linear(config.hidden_size , config.project_dim )
UpperCamelCase__ :Union[str, Any] = getattr(UpperCamelCase_ , '''has_pre_transformation''' , UpperCamelCase_ )
if self.has_pre_transformation:
UpperCamelCase__ :Any = nn.Linear(config.hidden_size , config.project_dim )
UpperCamelCase__ :str = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowerCAmelCase__ ( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ :str = self.base_model(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_attentions=UpperCamelCase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCamelCase_ , )
if self.has_pre_transformation:
UpperCamelCase__ :Dict = outputs['''hidden_states'''][-2]
UpperCamelCase__ :Optional[int] = self.pre_LN(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = self.transformation_pre(UpperCamelCase_ )
return TransformationModelOutput(
projection_state=UpperCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCamelCase__ :List[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=UpperCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 189 | 1 |
'''simple docstring'''
def A (__lowerCamelCase :str , __lowerCamelCase :str ):
assert x is not None
assert y is not None
_lowerCAmelCase = len(__lowerCamelCase )
_lowerCAmelCase = len(__lowerCamelCase )
# declaring the array for storing the dp values
_lowerCAmelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
_lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
_lowerCAmelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
_lowerCAmelCase = """"""
_lowerCAmelCase , _lowerCAmelCase = m, n
while i > 0 and j > 0:
_lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
_lowerCAmelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_lowercase = """AGGTAB"""
_lowercase = """GXTXAYB"""
_lowercase = 4
_lowercase = """GTAB"""
_lowercase , _lowercase = longest_common_subsequence(a, b)
print("""len =""", ln, """, sub-sequence =""", subseq)
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
def A (__lowerCamelCase :list , __lowerCamelCase :list , __lowerCamelCase :int ):
_lowerCAmelCase = len(__lowerCamelCase )
_lowerCAmelCase = [[0] * n for i in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
_lowerCAmelCase = y_points[i]
for i in range(2 , __lowerCamelCase ):
for j in range(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return model
@property
def _lowerCamelCase ( self : Tuple ):
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=1_0 , )
return model
@property
def _lowerCamelCase ( self : Tuple ):
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , )
__UpperCamelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , )
return vqvae, unet
@slow
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__UpperCamelCase = DDPMScheduler()
__UpperCamelCase = AudioDiffusionPipeline(vqvae=__A , unet=self.dummy_unet , mel=__A , scheduler=__A )
__UpperCamelCase = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__UpperCamelCase = torch.Generator(device=__A ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=__A , steps=4 )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
__UpperCamelCase = torch.Generator(device=__A ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=__A , steps=4 , return_dict=__A )
__UpperCamelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__UpperCamelCase = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:1_0]
__UpperCamelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__UpperCamelCase = DDIMScheduler()
__UpperCamelCase = self.dummy_vqvae_and_unet
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__A , scheduler=__A )
__UpperCamelCase = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
np.random.seed(0 )
__UpperCamelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__UpperCamelCase = torch.Generator(device=__A ).manual_seed(4_2 )
__UpperCamelCase = pipe(raw_audio=__A , generator=__A , start_step=5 , steps=1_0 )
__UpperCamelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__UpperCamelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = self.dummy_unet_condition
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=__A , mel=__A , scheduler=__A )
__UpperCamelCase = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
np.random.seed(0 )
__UpperCamelCase = torch.rand((1, 1, 1_0) )
__UpperCamelCase = pipe(generator=__A , encoding=__A )
__UpperCamelCase = output.images[0]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__UpperCamelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = torch_device
__UpperCamelCase = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
__UpperCamelCase = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__UpperCamelCase = torch.Generator(device=__A ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=__A )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0]
__UpperCamelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 399 |
'''simple docstring'''
import math
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ):
__UpperCamelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__lowercase )
if number < 1:
__UpperCamelCase = F'''Input value of [number={number}] must be > 0'''
raise ValueError(__lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , __lowercase ):
for _ in range(__lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a__ : int =0
try:
a__ : str =proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 399 | 1 |
import pytest
a_ :str = '__dummy_dataset1__'
a_ :str = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def a ( ) -> Tuple:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( A__ , A__ , A__ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = dataset_loading_script_name
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tmp_path / 'datasets' / script_name
script_dir.mkdir(parents=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = script_dir / f"""{script_name}.py"""
with open(_lowerCamelCase , '''w''' ) as f:
f.write(_lowerCamelCase )
return str(_lowerCamelCase )
| 716 |
def a ( A__ , A__ , A__ ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
SCREAMING_SNAKE_CASE__ : List[Any] = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
SCREAMING_SNAKE_CASE__ : Union[str, Any] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 0 |
import re
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : Dict = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(UpperCamelCase_ , UpperCamelCase_ ) )
if __name__ == "__main__":
__UpperCAmelCase : List[Any] = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 471 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCAmelCase : List[str] = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
__UpperCAmelCase : List[str] = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
__UpperCAmelCase : Union[str, Any] = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
return float((preds == labels).mean() )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : int = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
_a : List[Any] = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : str = float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )
_a : str = float(spearmanr(UpperCamelCase_ , UpperCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def snake_case_ ( self : Dict ) -> Union[str, Any]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def snake_case_ ( self : Optional[int] , __snake_case : Any , __snake_case : Any ) -> Union[str, Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__snake_case , __snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(__snake_case , __snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__snake_case , __snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 471 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 42
@flax_register_to_config
class A__ ( nn.Module , A__ , A__ ):
"""simple docstring"""
_lowercase = 3_2
_lowercase = 4
_lowercase = 4
_lowercase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_lowercase = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_lowercase = False
_lowercase = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_lowercase = 2
_lowercase = 8
_lowercase = None
_lowercase = 1_2_8_0
_lowercase = 0.0
_lowercase = False
_lowercase = jnp.floataa
_lowercase = True
_lowercase = 0
_lowercase = False
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : jax.random.KeyArray ):
# init input tensors
a__ : int = (1, self.in_channels, self.sample_size, self.sample_size)
a__ : Optional[int] = jnp.zeros(lowerCamelCase__ , dtype=jnp.floataa )
a__ : List[Any] = jnp.ones((1,) , dtype=jnp.intaa )
a__ : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
a__, a__ : str = jax.random.split(lowerCamelCase__ )
a__ : Any = {"params": params_rng, "dropout": dropout_rng}
return self.init(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )["params"]
def _UpperCamelCase( self : List[Any] ):
a__ : Tuple = self.block_out_channels
a__ : Tuple = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
a__ : List[Any] = self.num_attention_heads or self.attention_head_dim
# input
a__ : Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
a__ : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
a__ : Optional[Any] = FlaxTimestepEmbedding(lowerCamelCase__ , dtype=self.dtype )
a__ : Optional[Any] = self.only_cross_attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
a__ : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
a__ : Tuple = (num_attention_heads,) * len(self.down_block_types )
# down
a__ : int = []
a__ : Any = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
a__ : str = output_channel
a__ : Optional[Any] = block_out_channels[i]
a__ : int = i == len(lowerCamelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
a__ : int = FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
a__ : Optional[int] = FlaxDownBlockaD(
in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCamelCase__ )
a__ : Optional[int] = down_blocks
# mid
a__ : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
a__ : Optional[int] = []
a__ : List[str] = list(reversed(lowerCamelCase__ ) )
a__ : Union[str, Any] = list(reversed(lowerCamelCase__ ) )
a__ : Any = list(reversed(lowerCamelCase__ ) )
a__ : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
a__ : Any = output_channel
a__ : Tuple = reversed_block_out_channels[i]
a__ : Tuple = reversed_block_out_channels[min(i + 1 , len(lowerCamelCase__ ) - 1 )]
a__ : Optional[int] = i == len(lowerCamelCase__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
a__ : int = FlaxCrossAttnUpBlockaD(
in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , prev_output_channel=lowerCamelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
a__ : Optional[int] = FlaxUpBlockaD(
in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , prev_output_channel=lowerCamelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowerCamelCase__ )
a__ : List[Any] = output_channel
a__ : int = up_blocks
# out
a__ : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a__ : Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : int=None , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : bool = True , lowerCamelCase__ : bool = False , ):
# 1. time
if not isinstance(lowerCamelCase__ , jnp.ndarray ):
a__ : Optional[int] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCamelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
a__ : Optional[Any] = timesteps.astype(dtype=jnp.floataa )
a__ : Optional[int] = jnp.expand_dims(lowerCamelCase__ , 0 )
a__ : List[Any] = self.time_proj(lowerCamelCase__ )
a__ : List[Any] = self.time_embedding(lowerCamelCase__ )
# 2. pre-process
a__ : Optional[Any] = jnp.transpose(lowerCamelCase__ , (0, 2, 3, 1) )
a__ : int = self.conv_in(lowerCamelCase__ )
# 3. down
a__ : List[Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
a__, a__ : Optional[int] = down_block(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , deterministic=not train )
else:
a__, a__ : Optional[Any] = down_block(lowerCamelCase__ , lowerCamelCase__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
a__ : Union[str, Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowerCamelCase__ , lowerCamelCase__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
a__ : Any = new_down_block_res_samples
# 4. mid
a__ : Optional[int] = self.mid_block(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
a__ : str = down_block_res_samples[-(self.layers_per_block + 1) :]
a__ : int = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
a__ : Any = up_block(
lowerCamelCase__ , temb=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , res_hidden_states_tuple=lowerCamelCase__ , deterministic=not train , )
else:
a__ : Optional[Any] = up_block(lowerCamelCase__ , temb=lowerCamelCase__ , res_hidden_states_tuple=lowerCamelCase__ , deterministic=not train )
# 6. post-process
a__ : Optional[Any] = self.conv_norm_out(lowerCamelCase__ )
a__ : List[str] = nn.silu(lowerCamelCase__ )
a__ : Tuple = self.conv_out(lowerCamelCase__ )
a__ : Tuple = jnp.transpose(lowerCamelCase__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowerCamelCase__ )
| 151 |
def UpperCamelCase_ ( __a ) -> bool:
if num < 0:
return False
a__ : int = num
a__ : int = 0
while num > 0:
a__ : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 | 1 |
'''simple docstring'''
from math import factorial
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : float ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(lowerCamelCase ,lowerCamelCase ) or not isinstance(lowerCamelCase ,lowerCamelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_A : str = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_A : Any = float(factorial(lowerCamelCase ) )
coefficient /= factorial(lowerCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 128 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Tuple = '''▁'''
A : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''}
A : str = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
A : Any = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1024,
}
# fmt: off
A : Dict = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_VOCAB_FILES_MAP
a = ["input_ids", "attention_mask"]
a = []
a = []
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE : Dict="<s>" , SCREAMING_SNAKE_CASE : int="<unk>" , SCREAMING_SNAKE_CASE : Tuple="<pad>" , SCREAMING_SNAKE_CASE : List[str]="<mask>" , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE : str , ):
# Mask token behave like a normal word, i.e. include the space before it
_A : Tuple = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) else mask_token
_A : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
_A : Optional[int] = kwargs.get('additional_special_tokens' , [])
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=SCREAMING_SNAKE_CASE , tgt_lang=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(SCREAMING_SNAKE_CASE))
_A : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_A : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_A : List[Any] = 1
_A : List[Any] = len(self.sp_model)
_A : Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(SCREAMING_SNAKE_CASE)
}
_A : Dict = {v: k for k, v in self.lang_code_to_id.items()}
_A : List[str] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
_A : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_A : str = src_lang if src_lang is not None else 'en_XX'
_A : Any = self.lang_code_to_id[self._src_lang]
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def A ( self : Optional[Any]):
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A ( self : Union[str, Any]):
return self._src_lang
@src_lang.setter
def A ( self : List[str] , SCREAMING_SNAKE_CASE : str):
_A : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : Dict):
_A : int = self.__dict__.copy()
_A : Optional[Any] = None
return state
def __setstate__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict):
_A : Tuple = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_A : Any = {}
_A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A ( self : Any):
_A : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A ( self : int , SCREAMING_SNAKE_CASE : str):
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE)
def A ( self : List[str] , SCREAMING_SNAKE_CASE : str):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A : str = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any]):
_A : Tuple = []
_A : Any = ''
_A : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE) + token
_A : Any = True
_A : Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE)
_A : Tuple = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE)
return out_string.strip()
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
_A : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE , 'wb') as fi:
_A : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
def A ( self : int , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE)
_A : str = [1] * len(self.prefix_tokens)
_A : Optional[Any] = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE)) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE)) + ([0] * len(SCREAMING_SNAKE_CASE)) + suffix_ones
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] , SCREAMING_SNAKE_CASE : Optional[str] , **SCREAMING_SNAKE_CASE : str):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
_A : Optional[Any] = src_lang
_A : Optional[int] = self(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_A : List[str] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE)
_A : List[str] = tgt_lang_id
return inputs
def A ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str = "en_XX" , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , SCREAMING_SNAKE_CASE : str = "ro_RO" , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
_A : Any = src_lang
_A : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
def A ( self : List[Any]):
return self.set_src_lang_special_tokens(self.src_lang)
def A ( self : Any):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def A ( self : Tuple , SCREAMING_SNAKE_CASE : str):
_A : Optional[int] = self.lang_code_to_id[src_lang]
_A : Dict = [self.cur_lang_code_id]
_A : List[str] = [self.eos_token_id]
def A ( self : int , SCREAMING_SNAKE_CASE : str):
_A : str = self.lang_code_to_id[tgt_lang]
_A : int = [self.cur_lang_code_id]
_A : str = [self.eos_token_id]
| 128 | 1 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=False ) -> List[Any]:
try:
SCREAMING_SNAKE_CASE__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
SCREAMING_SNAKE_CASE__ : List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
SCREAMING_SNAKE_CASE__ : Tuple = strtobool(__lowerCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
a :List[str] = parse_flag_from_env("RUN_SLOW", default=False)
def _lowercase ( __lowerCAmelCase ) -> Any:
return unittest.skip("""Test was skipped""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> str:
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Tuple:
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Any:
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> int:
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> int:
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Any:
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> int:
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Any:
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Optional[Any]:
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Dict:
if test_case is None:
return partial(__lowerCAmelCase , version=__lowerCAmelCase )
return unittest.skipUnless(is_torch_version(""">=""" , __lowerCAmelCase ) , F'''test requires torch version >= {version}''' )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Dict:
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> List[str]:
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(__lowerCAmelCase )
a :Optional[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(__lowerCAmelCase )
class __a (unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = True
@classmethod
def _a ( cls ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = tempfile.mkdtemp()
@classmethod
def _a ( cls ) -> Optional[int]:
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _a ( self ) -> Dict:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_a )
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Dict:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mocks if isinstance(_a , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _lowercase ( __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] = AcceleratorState()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tensor[None].clone().to(state.device )
SCREAMING_SNAKE_CASE__ : Optional[Any] = gather(__lowerCAmelCase ).cpu()
SCREAMING_SNAKE_CASE__ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __lowerCAmelCase ):
return False
return True
class __a :
'''simple docstring'''
def __init__( self , _a , _a , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = returncode
SCREAMING_SNAKE_CASE__ : List[Any] = stdout
SCREAMING_SNAKE_CASE__ : Tuple = stderr
async def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
while True:
SCREAMING_SNAKE_CASE__ : Optional[int] = await stream.readline()
if line:
callback(__lowerCAmelCase )
else:
break
async def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ , """ """.join(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Any = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
def tee(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="" ):
SCREAMING_SNAKE_CASE__ : int = line.decode("""utf-8""" ).rstrip()
sink.append(__lowerCAmelCase )
if not quiet:
print(__lowerCAmelCase , __lowerCAmelCase , file=__lowerCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __lowerCAmelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __lowerCAmelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=__lowerCAmelCase , )
return _RunOutput(await p.wait() , __lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=180 , __lowerCAmelCase=False , __lowerCAmelCase=True ) -> _RunOutput:
SCREAMING_SNAKE_CASE__ : Optional[int] = asyncio.get_event_loop()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = loop.run_until_complete(
_stream_subprocess(__lowerCAmelCase , env=__lowerCAmelCase , stdin=__lowerCAmelCase , timeout=__lowerCAmelCase , quiet=__lowerCAmelCase , echo=__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Any = """ """.join(__lowerCAmelCase )
if result.returncode > 0:
SCREAMING_SNAKE_CASE__ : int = """\n""".join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
return result
class __a (UpperCamelCase_):
'''simple docstring'''
pass
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=False ) -> str:
try:
SCREAMING_SNAKE_CASE__ : Any = subprocess.check_output(__lowerCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__lowerCAmelCase , """decode""" ):
SCREAMING_SNAKE_CASE__ : Tuple = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{' '.join(__lowerCAmelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 12 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __a (UpperCamelCase_):
'''simple docstring'''
def _a ( self , _a ) -> Union[str, Any]:
"""simple docstring"""
with open(_a , encoding="""utf-8""" ) as input_file:
SCREAMING_SNAKE_CASE__ : str = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_file.read()
SCREAMING_SNAKE_CASE__ : str = regexp.search(_a )
return match
def _a ( self , _a ) -> Optional[Any]:
"""simple docstring"""
with open(_a , encoding="""utf-8""" ) as input_file:
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
SCREAMING_SNAKE_CASE__ : List[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
SCREAMING_SNAKE_CASE__ : Dict = regexp.finditer(_a )
SCREAMING_SNAKE_CASE__ : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = Path("""./datasets""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_a ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path("""./datasets""" )
SCREAMING_SNAKE_CASE__ : List[str] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_a ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 12 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 95 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : str , lowerCAmelCase : int = 16 , lowerCAmelCase : int = 88 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 32 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : str = "geglu" , lowerCAmelCase : Optional[int] = None , ):
super().__init__()
lowerCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCAmelCase , attention_head_dim=lowerCAmelCase , in_channels=lowerCAmelCase , num_layers=lowerCAmelCase , dropout=lowerCAmelCase , norm_num_groups=lowerCAmelCase , cross_attention_dim=lowerCAmelCase , attention_bias=lowerCAmelCase , sample_size=lowerCAmelCase , num_vector_embeds=lowerCAmelCase , activation_fn=lowerCAmelCase , num_embeds_ada_norm=lowerCAmelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCAmelCase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCAmelCase = [1, 0]
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Any=None , lowerCAmelCase : bool = True , ):
lowerCAmelCase = hidden_states
lowerCAmelCase = []
lowerCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCAmelCase = self.transformer_index_for_condition[i]
lowerCAmelCase = self.transformers[transformer_index](
lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , timestep=lowerCAmelCase , cross_attention_kwargs=lowerCAmelCase , return_dict=lowerCAmelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCAmelCase )
| 169 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
class a ( snake_case__ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 424 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
UpperCAmelCase_ : List[Any] = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Tuple = """tapas"""
def __init__( self , lowerCamelCase_=3_0_5_2_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1_0_2_4 , lowerCamelCase_=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , lowerCamelCase_=0.02 , lowerCamelCase_=1e-12 , lowerCamelCase_=0 , lowerCamelCase_=10.0 , lowerCamelCase_=0 , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=1.0 , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=1.0 , lowerCamelCase_=1.0 , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_="ratio" , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=6_4 , lowerCamelCase_=3_2 , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_a : Optional[Any] = vocab_size
_a : List[str] = hidden_size
_a : Union[str, Any] = num_hidden_layers
_a : Tuple = num_attention_heads
_a : Tuple = hidden_act
_a : Optional[Any] = intermediate_size
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : int = max_position_embeddings
_a : str = type_vocab_sizes
_a : Tuple = initializer_range
_a : int = layer_norm_eps
# Fine-tuning task hyperparameters
_a : Any = positive_label_weight
_a : Optional[int] = num_aggregation_labels
_a : Any = aggregation_loss_weight
_a : str = use_answer_as_supervision
_a : Optional[int] = answer_loss_importance
_a : int = use_normalized_answer_loss
_a : Optional[int] = huber_loss_delta
_a : Optional[int] = temperature
_a : Union[str, Any] = aggregation_temperature
_a : List[str] = use_gumbel_for_cells
_a : Optional[Any] = use_gumbel_for_aggregation
_a : str = average_approximation_function
_a : Tuple = cell_selection_preference
_a : Tuple = answer_loss_cutoff
_a : Optional[int] = max_num_rows
_a : List[Any] = max_num_columns
_a : Any = average_logits_per_cell
_a : str = select_one_column
_a : Any = allow_empty_column_selection
_a : Dict = init_cell_selection_weights_to_zero
_a : List[Any] = reset_position_index_per_cell
_a : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
_a : Dict = aggregation_labels
_a : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCamelCase_ ):
_a : str = {int(lowerCamelCase_ ): v for k, v in aggregation_labels.items()}
| 424 | 1 |
"""simple docstring"""
from collections.abc import Callable
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ = None ) -> None:
# Stores actual heap items.
a : list = []
# Stores indexes of each item for supporting updates and deletion.
a : dict = {}
# Stores current size of heap.
a : List[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
a : str = key or (lambda lowerCAmelCase__ : x)
def __a ( self , lowerCAmelCase__ ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def __a ( self , lowerCAmelCase__ ) -> int | None:
a : List[Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __a ( self , lowerCAmelCase__ ) -> int | None:
a : Dict = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
a, a : Optional[int] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
a, a : Optional[int] = self.arr[j], self.arr[i]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def __a ( self , lowerCAmelCase__ ) -> int:
a : Any = self._left(lowerCAmelCase__ )
a : Tuple = self._right(lowerCAmelCase__ )
a : List[Any] = i
if left is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__ ):
a : str = left
if right is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Any = right
return valid_parent
def __a ( self , lowerCAmelCase__ ) -> None:
a : Union[str, Any] = self._parent(lowerCAmelCase__ )
while parent is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__ ):
self._swap(lowerCAmelCase__ , lowerCAmelCase__ )
a, a : Optional[int] = parent, self._parent(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> None:
a : Any = self._get_valid_parent(lowerCAmelCase__ )
while valid_parent != index:
self._swap(lowerCAmelCase__ , lowerCAmelCase__ )
a, a : int = valid_parent, self._get_valid_parent(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if item not in self.pos_map:
return
a : str = self.pos_map[item]
a : Any = [item, self.key(lowerCAmelCase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCAmelCase__ )
self._heapify_down(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> None:
if item not in self.pos_map:
return
a : Any = self.pos_map[item]
del self.pos_map[item]
a : Any = self.arr[self.size - 1]
a : Any = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCAmelCase__ )
self._heapify_down(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
a : List[str] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowerCAmelCase__ )] )
else:
a : Union[str, Any] = [item, self.key(lowerCAmelCase__ )]
a : Tuple = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __a ( self ) -> tuple | None:
return self.arr[0] if self.size else None
def __a ( self ) -> tuple | None:
a : Union[str, Any] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
a : Optional[int] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str ) ->int:
'''simple docstring'''
a : Optional[Any] = RobertaPreLayerNormConfig.from_pretrained(
_lowercase , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
a : Any = torch.load(hf_hub_download(repo_id=_lowercase , filename="pytorch_model.bin" ) )
a : int = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
a : List[Any] = "roberta_prelayernorm." + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
a : Dict = tensor_value
a : Optional[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_lowercase , config=_lowercase , state_dict=_lowercase )
model.save_pretrained(_lowercase )
# convert tokenizer
a : List[Any] = AutoTokenizer.from_pretrained(_lowercase )
tokenizer.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Tuple = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 633 | 1 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
UpperCAmelCase : int = 10
def __a ( _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
for i in range(_lowercase , _lowercase ):
if array[i] == target:
return i
return -1
def __a ( _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[Any] = len(_lowercase )
while left <= right:
if right - left < precision:
return lin_search(_lowercase , _lowercase , _lowercase , _lowercase )
lowerCamelCase__ : Optional[Any] = (left + right) // 3 + 1
lowerCamelCase__ : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCamelCase__ : Tuple = one_third - 1
elif array[two_third] < target:
lowerCamelCase__ : List[Any] = two_third + 1
else:
lowerCamelCase__ : str = one_third + 1
lowerCamelCase__ : Union[str, Any] = two_third - 1
else:
return -1
def __a ( _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(_lowercase , _lowercase , _lowercase , _lowercase )
lowerCamelCase__ : Optional[Any] = (left + right) // 3 + 1
lowerCamelCase__ : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowercase , one_third - 1 , _lowercase , _lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowercase , _lowercase , _lowercase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowercase , _lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : Optional[int] = input("Enter numbers separated by comma:\n").strip()
UpperCAmelCase : List[Any] = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
UpperCAmelCase : Optional[Any] = int(input("Enter the number to be found in the list:\n").strip())
UpperCAmelCase : Optional[int] = ite_ternary_search(collection, target)
UpperCAmelCase : Optional[int] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("Not found")
| 121 | """simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
with open(_lowercase ) as metadata_file:
lowerCamelCase__ : List[Any] = json.load(_lowercase )
lowerCamelCase__ : List[Any] = LukeConfig(use_entity_aware_attention=_lowercase , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
lowerCamelCase__ : Dict = torch.load(_lowercase , map_location='''cpu''' )['''module''']
# Load the entity vocab file
lowerCamelCase__ : Dict = load_original_entity_vocab(_lowercase )
# add an entry for [MASK2]
lowerCamelCase__ : Optional[int] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCamelCase__ : int = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCamelCase__ : Any = AddedToken('''<ent>''' , lstrip=_lowercase , rstrip=_lowercase )
lowerCamelCase__ : int = AddedToken('''<ent2>''' , lstrip=_lowercase , rstrip=_lowercase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(_lowercase )
with open(os.path.join(_lowercase , '''tokenizer_config.json''' ) , '''r''' ) as f:
lowerCamelCase__ : Optional[Any] = json.load(_lowercase )
lowerCamelCase__ : Optional[int] = '''MLukeTokenizer'''
with open(os.path.join(_lowercase , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
with open(os.path.join(_lowercase , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
lowerCamelCase__ : Union[str, Any] = MLukeTokenizer.from_pretrained(_lowercase )
# Initialize the embeddings of the special tokens
lowerCamelCase__ : int = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
lowerCamelCase__ : List[Any] = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
lowerCamelCase__ : Any = state_dict['''embeddings.word_embeddings.weight''']
lowerCamelCase__ : Dict = word_emb[ent_init_index].unsqueeze(0 )
lowerCamelCase__ : Any = word_emb[enta_init_index].unsqueeze(0 )
lowerCamelCase__ : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCamelCase__ : Optional[Any] = state_dict[bias_name]
lowerCamelCase__ : Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCamelCase__ : List[str] = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCamelCase__ : Tuple = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCamelCase__ : Optional[int] = f"""encoder.layer.{layer_index}.attention.self."""
lowerCamelCase__ : List[Any] = state_dict[prefix + matrix_name]
lowerCamelCase__ : int = state_dict[prefix + matrix_name]
lowerCamelCase__ : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCamelCase__ : Any = state_dict['''entity_embeddings.entity_embeddings.weight''']
lowerCamelCase__ : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
lowerCamelCase__ : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCamelCase__ : Tuple = state_dict['''entity_predictions.bias''']
lowerCamelCase__ : Dict = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
lowerCamelCase__ : Any = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCamelCase__ : Optional[Any] = LukeForMaskedLM(config=_lowercase ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
lowerCamelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
lowerCamelCase__ : List[Any] = state_dict[key]
else:
lowerCamelCase__ : Dict = state_dict[key]
lowerCamelCase__ , lowerCamelCase__ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase )
if set(_lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(_lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCamelCase__ : Tuple = MLukeTokenizer.from_pretrained(_lowercase , task='''entity_classification''' )
lowerCamelCase__ : List[Any] = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
lowerCamelCase__ : Optional[int] = (0, 9)
lowerCamelCase__ : str = tokenizer(_lowercase , entity_spans=[span] , return_tensors='''pt''' )
lowerCamelCase__ : int = model(**_lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCamelCase__ : List[Any] = torch.Size((1, 33, 768) )
lowerCamelCase__ : Tuple = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowercase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCamelCase__ : str = torch.Size((1, 1, 768) )
lowerCamelCase__ : Dict = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowercase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCamelCase__ : List[str] = MLukeTokenizer.from_pretrained(_lowercase )
lowerCamelCase__ : List[str] = '''Tokyo is the capital of <mask>.'''
lowerCamelCase__ : int = (24, 30)
lowerCamelCase__ : Dict = tokenizer(_lowercase , entity_spans=[span] , return_tensors='''pt''' )
lowerCamelCase__ : List[Any] = model(**_lowercase )
lowerCamelCase__ : List[str] = encoding['''input_ids'''][0].tolist()
lowerCamelCase__ : str = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
lowerCamelCase__ : str = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_lowercase )
lowerCamelCase__ : List[str] = outputs.entity_logits[0][0].argmax().item()
lowerCamelCase__ : str = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_lowercase ) )
model.save_pretrained(_lowercase )
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
lowerCamelCase__ : List[Any] = [json.loads(_lowercase ) for line in open(_lowercase )]
lowerCamelCase__ : int = {}
for entry in data:
lowerCamelCase__ : Union[str, Any] = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCamelCase__ : Optional[int] = entity_id
break
lowerCamelCase__ : Optional[Any] = f"""{language}:{entity_name}"""
lowerCamelCase__ : Optional[Any] = entity_id
return new_mapping
if __name__ == "__main__":
UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 121 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : str , a__ : Optional[int] , a__ : List[Any]=13 , a__ : Optional[int]=7 , a__ : str=6 , a__ : Union[str, Any]=17 , a__ : List[Any]=23 , a__ : Tuple=11 , a__ : Optional[int]=True , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = act_dim
UpperCAmelCase = state_dim
UpperCAmelCase = hidden_size
UpperCAmelCase = max_length
UpperCAmelCase = is_training
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
UpperCAmelCase = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __snake_case ( self : List[Any] ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __snake_case ( self : Dict , a__ : Union[str, Any] , a__ : Dict , a__ : Dict , a__ : Dict , a__ : List[Any] , a__ : List[str] , a__ : Union[str, Any] , ):
UpperCAmelCase = DecisionTransformerModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , a__ , a__ , a__ , a__ , a__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
),
) = config_and_inputs
UpperCAmelCase = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(DecisionTransformerModel,) if is_torch_available() else ()
_lowerCamelCase =()
_lowerCamelCase ={"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_lowerCamelCase =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = DecisionTransformerModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __snake_case ( self : str ):
self.config_tester.run_common_tests()
def __snake_case ( self : str ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
@slow
def __snake_case ( self : List[str] ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = DecisionTransformerModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def __snake_case ( self : List[str] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(a__ )] , a__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase = 10 # defined by the RL environment, may be normalized
UpperCAmelCase = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
UpperCAmelCase = model.to(a__ )
UpperCAmelCase = model.config
torch.manual_seed(0 )
UpperCAmelCase = torch.randn(1 , 1 , config.state_dim ).to(device=a__ , dtype=torch.floataa ) # env.reset()
UpperCAmelCase = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=a__ )
UpperCAmelCase = torch.tensor(a__ , device=a__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase = state
UpperCAmelCase = torch.zeros(1 , 0 , config.act_dim , device=a__ , dtype=torch.floataa )
UpperCAmelCase = torch.zeros(1 , 0 , device=a__ , dtype=torch.floataa )
UpperCAmelCase = torch.tensor(0 , device=a__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(a__ ):
UpperCAmelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=a__ )] , dim=1 )
UpperCAmelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=a__ )] , dim=1 )
UpperCAmelCase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = model(
states=a__ , actions=a__ , rewards=a__ , returns_to_go=a__ , timesteps=a__ , attention_mask=a__ , return_dict=a__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=a__ , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase = action_pred[0, -1]
UpperCAmelCase = torch.cat([states, state] , dim=1 )
UpperCAmelCase = returns_to_go[0, -1] - reward
UpperCAmelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=a__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 51 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCamelCase_ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(A__ ) ,version.parse(A__ ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def snake_case ( A__ ,A__ = None ):
UpperCAmelCase_ : int = F"""\n{hint}""" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" ,A__ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = requirement, None, None
else:
UpperCAmelCase_ : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" ,A__ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F""" got {requirement}""" )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = match[0]
UpperCAmelCase_ : Optional[Any] = want_full.split("," ) # there could be multiple requirements
UpperCAmelCase_ : int = {}
for w in want_range:
UpperCAmelCase_ : str = re.findall(r"^([\s!=<>]{1,2})(.+)" ,A__ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F""" but got {requirement}""" )
UpperCAmelCase_ , UpperCAmelCase_ : Any = match[0]
UpperCAmelCase_ : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
UpperCAmelCase_ : List[Any] = ".".join([str(A__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
return
# check if any version is installed
try:
UpperCAmelCase_ : str = importlib.metadata.version(A__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
def snake_case ( A__ ):
UpperCAmelCase_ : Any = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(A__ ,A__ )
| 95 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__a: Optional[int] = logging.get_logger(__name__)
__a: List[str] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__a: List[str] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__a: Dict = {
"""allenai/led-base-16384""": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __UpperCamelCase ( ):
lowercase__ : Tuple = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase__ : List[Any] = bs[:]
lowercase__ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase )
cs.append(2**8 + n )
n += 1
lowercase__ : Union[str, Any] = [chr(UpperCAmelCase ) for n in cs]
return dict(zip(UpperCAmelCase , UpperCAmelCase ) )
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Tuple = set()
lowercase__ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Tuple = char
return pairs
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="replace" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=False , **__lowerCAmelCase , ) -> Tuple:
lowercase__ : Optional[Any] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else bos_token
lowercase__ : List[Any] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else eos_token
lowercase__ : str = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else sep_token
lowercase__ : Any = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else cls_token
lowercase__ : Optional[Any] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else unk_token
lowercase__ : Union[str, Any] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Any = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
super().__init__(
errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , **__lowerCAmelCase , )
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase__ : Optional[Any] = json.load(__lowerCAmelCase )
lowercase__ : Any = {v: k for k, v in self.encoder.items()}
lowercase__ : List[str] = errors # how to handle errors in decoding
lowercase__ : List[str] = bytes_to_unicode()
lowercase__ : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase__ : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1]
lowercase__ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ : Any = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase__ : Tuple = {}
lowercase__ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ : int = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowerCAmelCase( self ) -> List[str]:
return len(self.encoder )
def _lowerCAmelCase( self ) -> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
lowercase__ : int = tuple(__lowerCAmelCase )
lowercase__ : int = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
lowercase__ : int = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : Any = bigram
lowercase__ : str = []
lowercase__ : str = 0
while i < len(__lowerCAmelCase ):
try:
lowercase__ : int = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : Tuple = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : List[str] = tuple(__lowerCAmelCase )
lowercase__ : str = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
lowercase__ : List[str] = get_pairs(__lowerCAmelCase )
lowercase__ : str = ''' '''.join(__lowerCAmelCase )
lowercase__ : Optional[Any] = word
return word
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
lowercase__ : Optional[Any] = []
for token in re.findall(self.pat , __lowerCAmelCase ):
lowercase__ : Union[str, Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCAmelCase ).split(''' ''' ) )
return bpe_tokens
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Tuple:
return self.decoder.get(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
lowercase__ : Optional[int] = ''''''.join(__lowerCAmelCase )
lowercase__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : str = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Union[str, Any] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
lowercase__ : Union[str, Any] = 0
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowercase__ : Dict = token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : Optional[int] = [self.cls_token_id]
lowercase__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : str = [self.sep_token_id]
lowercase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ) -> str:
lowercase__ : Any = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCAmelCase ) > 0 and not text[0].isspace()):
lowercase__ : List[Any] = ''' ''' + text
return (text, kwargs)
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = PaddingStrategy.DO_NOT_PAD , __lowerCAmelCase = None , __lowerCAmelCase = None , ) -> dict:
lowercase__ : Tuple = super()._pad(
encoded_inputs=__lowerCAmelCase , max_length=__lowerCAmelCase , padding_strategy=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase__ : Tuple = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__ : Optional[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__ : Dict = len(encoded_inputs['''global_attention_mask'''] ) != len(__lowerCAmelCase )
if needs_to_be_padded:
lowercase__ : Dict = len(__lowerCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__ : Union[str, Any] = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__ : int = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 428 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a: Optional[int] = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Union[str, Any] = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: str = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__a: List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 428 | 1 |
from math import isqrt, loga
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : Dict = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase_ , UpperCamelCase_ ):
_lowerCAmelCase : str = False
return [i for i in range(2 , UpperCamelCase_ ) if is_prime[i]]
def _UpperCAmelCase (UpperCamelCase_ : int = 800800 , UpperCamelCase_ : int = 800800 ):
'''simple docstring'''
_lowerCAmelCase : str = degree * loga(UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = int(UpperCamelCase_ )
_lowerCAmelCase : int = calculate_prime_numbers(UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : Tuple = len(UpperCamelCase_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 429 |
from __future__ import annotations
from collections import deque
class __snake_case :
def __init__( self : str , _UpperCAmelCase : list[str] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : list[dict] = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(_UpperCAmelCase )
self.set_fail_transitions()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : str ) -> int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_lowerCAmelCase : List[Any] = 0
for character in keyword:
_lowerCAmelCase : Tuple = self.find_next_state(_UpperCAmelCase , _UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_lowerCAmelCase : int = len(self.adlist ) - 1
else:
_lowerCAmelCase : Union[str, Any] = next_state
self.adlist[current_state]["output"].append(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> None:
'''simple docstring'''
_lowerCAmelCase : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(_UpperCAmelCase )
_lowerCAmelCase : Any = 0
while q:
_lowerCAmelCase : Optional[int] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_UpperCAmelCase )
_lowerCAmelCase : Tuple = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(_UpperCAmelCase , self.adlist[child]["""value"""] ) is None
and state != 0
):
_lowerCAmelCase : List[Any] = self.adlist[state]["""fail_state"""]
_lowerCAmelCase : Optional[int] = self.find_next_state(
_UpperCAmelCase , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[int] = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : str ) -> dict[str, list[int]]:
'''simple docstring'''
_lowerCAmelCase : dict = {} # returns a dict with keywords and list of its occurrences
_lowerCAmelCase : str = 0
for i in range(len(_UpperCAmelCase ) ):
while (
self.find_next_state(_UpperCAmelCase , string[i] ) is None
and current_state != 0
):
_lowerCAmelCase : Union[str, Any] = self.adlist[current_state]["""fail_state"""]
_lowerCAmelCase : Any = self.find_next_state(_UpperCAmelCase , string[i] )
if next_state is None:
_lowerCAmelCase : Union[str, Any] = 0
else:
_lowerCAmelCase : Tuple = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_lowerCAmelCase : str = []
result[key].append(i - len(_UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 429 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A_, A_=13, A_=3, A_=224, A_=30, A_=400, A_=True, A_=None, A_=True, A_=[0.5, 0.5, 0.5], A_=[0.5, 0.5, 0.5], ) -> Union[str, Any]:
UpperCAmelCase__ =size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase__ =parent
UpperCAmelCase__ =batch_size
UpperCAmelCase__ =num_channels
UpperCAmelCase__ =image_size
UpperCAmelCase__ =min_resolution
UpperCAmelCase__ =max_resolution
UpperCAmelCase__ =do_resize
UpperCAmelCase__ =size
UpperCAmelCase__ =do_normalize
UpperCAmelCase__ =image_mean
UpperCAmelCase__ =image_std
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case_ ( a, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ViTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ =EfficientFormerImageProcessorTester(self )
@property
def __UpperCAmelCase ( self ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_, "image_mean" ) )
self.assertTrue(hasattr(A_, "image_std" ) )
self.assertTrue(hasattr(A_, "do_normalize" ) )
self.assertTrue(hasattr(A_, "do_resize" ) )
self.assertTrue(hasattr(A_, "size" ) )
def __UpperCAmelCase ( self ) -> Optional[int]:
pass
def __UpperCAmelCase ( self ) -> Any:
# Initialize image_processor
UpperCAmelCase__ =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ =prepare_image_inputs(self.image_proc_tester, equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_, Image.Image )
# Test not batched input
UpperCAmelCase__ =image_processor(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
), )
# Test batched
UpperCAmelCase__ =image_processor(A_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
), )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processor
UpperCAmelCase__ =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ =prepare_image_inputs(self.image_proc_tester, equal_resolution=A_, numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_, np.ndarray )
# Test not batched input
UpperCAmelCase__ =image_processor(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
), )
# Test batched
UpperCAmelCase__ =image_processor(A_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
), )
def __UpperCAmelCase ( self ) -> Dict:
# Initialize image_processor
UpperCAmelCase__ =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ =prepare_image_inputs(self.image_proc_tester, equal_resolution=A_, torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_, torch.Tensor )
# Test not batched input
UpperCAmelCase__ =image_processor(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
), )
# Test batched
UpperCAmelCase__ =image_processor(A_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
), )
| 510 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _UpperCAmelCase ( A ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
UpperCamelCase_ = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class snake_case_ ( a ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( A_ ) -> Union[str, Any]:
UpperCAmelCase__ =parser.add_parser(
"convert", help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.", )
train_parser.add_argument("--model_type", type=A_, required=A_, help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint", type=A_, required=A_, help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output", type=A_, required=A_, help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config", type=A_, default="", help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name", type=A_, default=A_, help="Optional fine-tuning task name if the TF model was a finetuned model.", )
train_parser.set_defaults(func=A_ )
def __init__( self, A_, A_, A_, A_, A_, *A_, ) -> List[str]:
UpperCAmelCase__ =logging.get_logger("transformers-cli/converting" )
self._logger.info(f"""Loading model {model_type}""" )
UpperCAmelCase__ =model_type
UpperCAmelCase__ =tf_checkpoint
UpperCAmelCase__ =pytorch_dump_output
UpperCAmelCase__ =config
UpperCAmelCase__ =finetuning_task_name
def __UpperCAmelCase ( self ) -> Tuple:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(A_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase__ =self._tf_checkpoint
UpperCAmelCase__ =""
else:
UpperCAmelCase__ =self._tf_checkpoint
UpperCAmelCase__ =""
convert_transfo_xl_checkpoint_to_pytorch(
A_, self._config, self._pytorch_dump_output, A_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 510 | 1 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : List[Any] = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase__ : Dict = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase__ : int = model.state_dict()
def to_tf_var_name(lowerCAmelCase__ ):
for patt, repl in iter(lowerCAmelCase__ ):
UpperCAmelCase__ : str = name.replace(lowerCAmelCase__ , lowerCAmelCase__ )
return F"""bert/{name}"""
def create_tf_var(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase__ : Optional[int] = tf.get_variable(dtype=lowerCAmelCase__ , shape=tensor.shape , name=lowerCAmelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCAmelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase__ : Union[str, Any] = to_tf_var_name(lowerCAmelCase__ )
UpperCAmelCase__ : Union[str, Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase__ : List[Any] = torch_tensor.T
UpperCAmelCase__ : Optional[Any] = create_tf_var(tensor=lowerCAmelCase__ , name=lowerCAmelCase__ , session=lowerCAmelCase__ )
tf.keras.backend.set_value(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Dict = session.run(lowerCAmelCase__ )
print(F"""Successfully created {tf_name}: {np.allclose(lowerCAmelCase__ , lowerCAmelCase__ )}""" )
UpperCAmelCase__ : Optional[int] = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def a__ ( lowerCAmelCase__=None ) -> str:
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase__ : str = parser.parse_args(lowerCAmelCase__ )
UpperCAmelCase__ : int = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 75 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100"""
UpperCamelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCamelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCamelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 75 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Any =logging.get_logger(__name__)
_UpperCamelCase : List[str] ={
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Optional[int] = "vit_msn"
def __init__( self ,A__=768 ,A__=12 ,A__=12 ,A__=3072 ,A__="gelu" ,A__=0.0 ,A__=0.0 ,A__=0.02 ,A__=1E-06 ,A__=224 ,A__=16 ,A__=3 ,A__=True ,**A__ ,):
super().__init__(**A__ )
_A : Dict = hidden_size
_A : Optional[Any] = num_hidden_layers
_A : Tuple = num_attention_heads
_A : Any = intermediate_size
_A : Tuple = hidden_act
_A : int = hidden_dropout_prob
_A : Dict = attention_probs_dropout_prob
_A : Optional[int] = initializer_range
_A : str = layer_norm_eps
_A : int = image_size
_A : Union[str, Any] = patch_size
_A : Optional[Any] = num_channels
_A : List[str] = qkv_bias
| 704 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_UpperCamelCase : List[str] ='\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_UpperCamelCase : Tuple ='\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
_UpperCamelCase : Any ='\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def a__ (__lowercase :Any ) -> Any:
def remove_articles(__lowercase :Optional[Any] ):
_A : List[Any] = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(__lowercase , ''' ''' , __lowercase )
def white_space_fix(__lowercase :Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowercase :Optional[Any] ):
_A : List[str] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowercase :List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowercase ) ) ) )
def a__ (__lowercase :List[str] , __lowercase :str ) -> int:
return int(normalize_answer(__lowercase ) == normalize_answer(__lowercase ) )
def a__ (__lowercase :Union[str, Any] , __lowercase :str ) -> Optional[int]:
_A : Any = [any(compute_exact(__lowercase , __lowercase ) for ref in refs ) for pred, refs in zip(__lowercase , __lowercase )]
return (sum(__lowercase ) / len(__lowercase )) * 100
def a__ (__lowercase :Tuple , __lowercase :int , __lowercase :Union[str, Any] , __lowercase :List[Any] ) -> List[str]:
_A : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
_A : List[Any] = Counter(__lowercase )
_A : Optional[int] = Counter(__lowercase )
_A : Any = Counter()
for sgram, scount in sgramcounter.items():
_A : Optional[int] = scount * numref
_A : Tuple = Counter(__lowercase )
_A : str = Counter()
for cgram, ccount in cgramcounter.items():
_A : int = ccount * numref
# KEEP
_A : List[str] = sgramcounter_rep & cgramcounter_rep
_A : List[str] = keepgramcounter_rep & rgramcounter
_A : Any = sgramcounter_rep & rgramcounter
_A : Optional[int] = 0
_A : Any = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_A : List[str] = 1
_A : Union[str, Any] = 1
if len(__lowercase ) > 0:
_A : int = keeptmpscorea / len(__lowercase )
if len(__lowercase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_A : int = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_A : Optional[int] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_A : Tuple = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_A : Dict = sgramcounter_rep - cgramcounter_rep
_A : Union[str, Any] = delgramcounter_rep - rgramcounter
_A : Optional[Any] = sgramcounter_rep - rgramcounter
_A : str = 0
_A : int = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_A : int = 1
if len(__lowercase ) > 0:
_A : Optional[Any] = deltmpscorea / len(__lowercase )
# ADDITION
_A : Tuple = set(__lowercase ) - set(__lowercase )
_A : Optional[Any] = set(__lowercase ) & set(__lowercase )
_A : Union[str, Any] = set(__lowercase ) - set(__lowercase )
_A : Any = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_A : Optional[int] = 1
_A : Union[str, Any] = 1
if len(__lowercase ) > 0:
_A : List[str] = addtmpscore / len(__lowercase )
if len(__lowercase ) > 0:
_A : int = addtmpscore / len(__lowercase )
_A : str = 0
if addscore_precision > 0 or addscore_recall > 0:
_A : List[Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def a__ (__lowercase :List[str] , __lowercase :Tuple , __lowercase :Tuple ) -> str:
_A : Any = len(__lowercase )
_A : int = ssent.split(''' ''' )
_A : List[Any] = csent.split(''' ''' )
_A : Optional[int] = []
_A : str = []
_A : Tuple = []
_A : Optional[int] = []
_A : str = []
_A : Tuple = []
_A : Dict = []
_A : Union[str, Any] = []
_A : int = []
_A : str = []
for rsent in rsents:
_A : Optional[int] = rsent.split(''' ''' )
_A : Optional[int] = []
_A : Dict = []
_A : Dict = []
ragramslist.append(__lowercase )
for i in range(0 , len(__lowercase ) - 1 ):
if i < len(__lowercase ) - 1:
_A : Tuple = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(__lowercase )
if i < len(__lowercase ) - 2:
_A : List[Any] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(__lowercase )
if i < len(__lowercase ) - 3:
_A : int = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(__lowercase )
ragramslist.append(__lowercase )
ragramslist.append(__lowercase )
ragramslist.append(__lowercase )
for i in range(0 , len(__lowercase ) - 1 ):
if i < len(__lowercase ) - 1:
_A : List[str] = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(__lowercase )
if i < len(__lowercase ) - 2:
_A : Optional[Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(__lowercase )
if i < len(__lowercase ) - 3:
_A : List[Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(__lowercase )
for i in range(0 , len(__lowercase ) - 1 ):
if i < len(__lowercase ) - 1:
_A : Union[str, Any] = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(__lowercase )
if i < len(__lowercase ) - 2:
_A : Optional[Any] = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(__lowercase )
if i < len(__lowercase ) - 3:
_A : Tuple = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(__lowercase )
((_A) , (_A) , (_A)) : Any = SARIngram(__lowercase , __lowercase , __lowercase , __lowercase )
((_A) , (_A) , (_A)) : List[Any] = SARIngram(__lowercase , __lowercase , __lowercase , __lowercase )
((_A) , (_A) , (_A)) : Union[str, Any] = SARIngram(__lowercase , __lowercase , __lowercase , __lowercase )
((_A) , (_A) , (_A)) : int = SARIngram(__lowercase , __lowercase , __lowercase , __lowercase )
_A : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_A : Union[str, Any] = sum([delascore, delascore, delascore, delascore] ) / 4
_A : List[Any] = sum([addascore, addascore, addascore, addascore] ) / 4
_A : Optional[int] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def a__ (__lowercase :Optional[int] , __lowercase :bool = True , __lowercase :str = "13a" , __lowercase :bool = True ) -> Any:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_A : Optional[Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_A : List[str] = sacrebleu.metrics.bleu._get_tokenizer(__lowercase )()(__lowercase )
else:
_A : Any = sacrebleu.TOKENIZERS[tokenizer]()(__lowercase )
elif tokenizer == "moses":
_A : List[Any] = sacremoses.MosesTokenizer().tokenize(__lowercase , return_str=__lowercase , escape=__lowercase )
elif tokenizer == "penn":
_A : Tuple = sacremoses.MosesTokenizer().penn_tokenize(__lowercase , return_str=__lowercase )
else:
_A : Optional[Any] = sentence
if not return_str:
_A : List[Any] = normalized_sent.split()
return normalized_sent
def a__ (__lowercase :Optional[int] , __lowercase :Optional[Any] , __lowercase :Union[str, Any] ) -> Any:
if not (len(__lowercase ) == len(__lowercase ) == len(__lowercase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_A : int = 0
for src, pred, refs in zip(__lowercase , __lowercase , __lowercase ):
sari_score += SARIsent(normalize(__lowercase ) , normalize(__lowercase ) , [normalize(__lowercase ) for sent in refs] )
_A : Dict = sari_score / len(__lowercase )
return 100 * sari_score
def a__ (__lowercase :Union[str, Any] , __lowercase :Any , __lowercase :List[Any]="exp" , __lowercase :Optional[int]=None , __lowercase :Tuple=False , __lowercase :Tuple=False , __lowercase :Any=False , ) -> Union[str, Any]:
_A : int = len(references[0] )
if any(len(__lowercase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_A : Optional[Any] = [[refs[i] for refs in references] for i in range(__lowercase )]
_A : str = sacrebleu.corpus_bleu(
__lowercase , __lowercase , smooth_method=__lowercase , smooth_value=__lowercase , force=__lowercase , lowercase=__lowercase , use_effective_order=__lowercase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
def A__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' ,id='''sequence''' ) ,id='''references''' ),
} ) ,codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] ,reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] ,)
def A__ ( self ,A__ ,A__ ,A__ ):
_A : Union[str, Any] = {}
result.update({'''sari''': compute_sari(sources=A__ ,predictions=A__ ,references=A__ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=A__ ,references=A__ )} )
result.update({'''exact''': compute_em(predictions=A__ ,references=A__ )} )
return result
| 332 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 101 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : int=None ) -> Any:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =field(
metadata={"help": "The csv file to plot."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
_lowerCamelCase =list_field(
default=UpperCAmelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
int(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any ) -> str:
"""simple docstring"""
try:
float(SCREAMING_SNAKE_CASE_ )
return True
except ValueError:
return False
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a__ : Optional[int] ):
UpperCAmelCase = args
UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
UpperCAmelCase = csv.DictReader(a__ )
for row in reader:
UpperCAmelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
UpperCAmelCase = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
UpperCAmelCase = float(row['''result'''] )
def __snake_case ( self : Dict ):
UpperCAmelCase, UpperCAmelCase = plt.subplots()
UpperCAmelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
UpperCAmelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
UpperCAmelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
UpperCAmelCase = self.result_dict[model_name]['''result''']
((UpperCAmelCase), (UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=a__ , )
else:
UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((UpperCAmelCase), (UpperCAmelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
UpperCAmelCase = np.asarray(a__ , a__ )[: len(a__ )]
plt.scatter(
a__ , a__ , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(a__ , a__ , '''--''' )
title_str += f" {label_model_name} vs."
UpperCAmelCase = title_str[:-4]
UpperCAmelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(a__ )
plt.xlabel(a__ )
plt.ylabel(a__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase = Plot(args=SCREAMING_SNAKE_CASE_ )
plot.plot()
if __name__ == "__main__":
main()
| 51 | 0 |
'''simple docstring'''
def lowercase ( lowerCAmelCase : int):
"""simple docstring"""
return str(lowerCAmelCase) == str(lowerCAmelCase)[::-1]
def lowercase ( lowerCAmelCase : int):
"""simple docstring"""
return int(lowerCAmelCase) + int(str(lowerCAmelCase)[::-1])
def lowercase ( lowerCAmelCase : int = 1_0000):
"""simple docstring"""
_A : Any = []
for num in range(1 , lowerCAmelCase):
_A : Optional[Any] = 0
_A : Any = num
while iterations < 50:
_A : Union[str, Any] = sum_reverse(lowerCAmelCase)
iterations += 1
if is_palindrome(lowerCAmelCase):
break
else:
lychrel_nums.append(lowerCAmelCase)
return len(lowerCAmelCase)
if __name__ == "__main__":
print(f'{solution() = }')
| 417 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowercase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str]=1024):
"""simple docstring"""
_A , _A : str = [], []
_A : Optional[Any] = list(zip(lowerCAmelCase , lowerCAmelCase))
_A , _A : Dict = sorted_examples[0]
def is_too_big(lowerCAmelCase : Optional[Any]):
return tok(lowerCAmelCase , return_tensors='''pt''').input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:]):
_A : Optional[int] = new_src + ''' ''' + src
_A : List[str] = new_tgt + ''' ''' + tgt
if is_too_big(lowerCAmelCase) or is_too_big(lowerCAmelCase): # cant fit, finalize example
finished_src.append(lowerCAmelCase)
finished_tgt.append(lowerCAmelCase)
_A , _A : Dict = src, tgt
else: # can fit, keep adding
_A , _A : Optional[int] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCAmelCase)
finished_tgt.append(lowerCAmelCase)
return finished_src, finished_tgt
def lowercase ( lowerCAmelCase : Any , lowerCAmelCase : Path , lowerCAmelCase : Tuple , lowerCAmelCase : Any):
"""simple docstring"""
_A : Dict = Path(lowerCAmelCase)
save_path.mkdir(exist_ok=lowerCAmelCase)
for split in ["train"]:
_A , _A : List[str] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
_A : Optional[Any] = [x.rstrip() for x in Path(lowerCAmelCase).open().readlines()]
_A : Optional[Any] = [x.rstrip() for x in Path(lowerCAmelCase).open().readlines()]
_A , _A : Optional[Any] = pack_examples(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
print(f"""packed {split} split from {len(lowerCAmelCase)} examples -> {len(lowerCAmelCase)}.""")
Path(save_path / f"""{split}.source""").open('''w''').write('''\n'''.join(lowerCAmelCase))
Path(save_path / f"""{split}.target""").open('''w''').write('''\n'''.join(lowerCAmelCase))
for split in ["val", "test"]:
_A , _A : List[str] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(lowerCAmelCase , save_path / f"""{split}.source""")
shutil.copyfile(lowerCAmelCase , save_path / f"""{split}.target""")
def lowercase ( ):
"""simple docstring"""
_A : Any = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=lowerCAmelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''')
parser.add_argument('''--max_seq_len''' , type=lowerCAmelCase , default=128)
parser.add_argument('''--data_dir''' , type=lowerCAmelCase)
parser.add_argument('''--save_path''' , type=lowerCAmelCase)
_A : List[str] = parser.parse_args()
_A : List[Any] = AutoTokenizer.from_pretrained(args.tok_name)
return pack_data_dir(lowerCAmelCase , Path(args.data_dir) , args.max_seq_len , args.save_path)
if __name__ == "__main__":
packer_cli()
| 417 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class snake_case :
'''simple docstring'''
snake_case_ : Union[str, Any] = None
def UpperCamelCase_ ( self : str) -> Any:
"""simple docstring"""
_snake_case : Dict = self.feature_extraction_class(**self.feat_extract_dict)
_snake_case : Dict = json.loads(feat_extract.to_json_string())
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowerCAmelCase)
def UpperCamelCase_ ( self : Optional[Any]) -> str:
"""simple docstring"""
_snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : List[Any] = os.path.join(lowerCAmelCase , """feat_extract.json""")
feat_extract_first.to_json_file(lowerCAmelCase)
_snake_case : Optional[int] = self.feature_extraction_class.from_json_file(lowerCAmelCase)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def UpperCamelCase_ ( self : Optional[int]) -> Dict:
"""simple docstring"""
_snake_case : Any = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : Optional[int] = feat_extract_first.save_pretrained(lowerCAmelCase)[0]
check_json_file_has_correct_format(lowerCAmelCase)
_snake_case : str = self.feature_extraction_class.from_pretrained(lowerCAmelCase)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def UpperCamelCase_ ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case : Optional[int] = self.feature_extraction_class()
self.assertIsNotNone(lowerCAmelCase)
| 477 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
a__ = """hf-internal-testing/tiny-random-bert"""
a__ = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
a__ = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[int]) -> Dict:
"""simple docstring"""
_snake_case : Optional[Any] = cached_file(lowerCAmelCase , lowerCAmelCase)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCAmelCase))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCAmelCase , lowerCAmelCase)))
with open(os.path.join(lowerCAmelCase , """refs""" , """main""")) as f:
_snake_case : List[str] = f.read()
self.assertEqual(lowerCAmelCase , os.path.join(lowerCAmelCase , """snapshots""" , lowerCAmelCase , lowerCAmelCase))
self.assertTrue(os.path.isfile(lowerCAmelCase))
# File is cached at the same place the second time.
_snake_case : List[Any] = cached_file(lowerCAmelCase , lowerCAmelCase)
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
# Using a specific revision to test the full commit hash.
_snake_case : Tuple = cached_file(lowerCAmelCase , lowerCAmelCase , revision="""9b8c223""")
self.assertEqual(lowerCAmelCase , os.path.join(lowerCAmelCase , """snapshots""" , lowerCAmelCase , lowerCAmelCase))
def UpperCamelCase_ ( self : Tuple) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(lowerCAmelCase , """is not a valid model identifier"""):
_snake_case : Dict = cached_file("""tiny-random-bert""" , lowerCAmelCase)
with self.assertRaisesRegex(lowerCAmelCase , """is not a valid git identifier"""):
_snake_case : str = cached_file(lowerCAmelCase , lowerCAmelCase , revision="""aaaa""")
with self.assertRaisesRegex(lowerCAmelCase , """does not appear to have a file named"""):
_snake_case : Optional[Any] = cached_file(lowerCAmelCase , """conf""")
def UpperCamelCase_ ( self : Optional[Any]) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(lowerCAmelCase , """does not appear to have a file named"""):
_snake_case : Optional[int] = cached_file(lowerCAmelCase , """conf""")
with open(os.path.join(lowerCAmelCase , """refs""" , """main""")) as f:
_snake_case : List[Any] = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase , """.no_exist""" , lowerCAmelCase , """conf""")))
_snake_case : Any = cached_file(lowerCAmelCase , """conf""" , _raise_exceptions_for_missing_entries=lowerCAmelCase)
self.assertIsNone(lowerCAmelCase)
_snake_case : Optional[int] = cached_file(lowerCAmelCase , """conf""" , local_files_only=lowerCAmelCase , _raise_exceptions_for_missing_entries=lowerCAmelCase)
self.assertIsNone(lowerCAmelCase)
_snake_case : Union[str, Any] = mock.Mock()
_snake_case : Any = 500
_snake_case : List[str] = {}
_snake_case : Union[str, Any] = HTTPError
_snake_case : Dict = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=lowerCAmelCase) as mock_head:
_snake_case : Optional[Any] = cached_file(lowerCAmelCase , """conf""" , _raise_exceptions_for_connection_errors=lowerCAmelCase)
self.assertIsNone(lowerCAmelCase)
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self : Dict) -> Dict:
"""simple docstring"""
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , lowerCAmelCase))
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , lowerCAmelCase))
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , lowerCAmelCase))
def UpperCamelCase_ ( self : Dict) -> str:
"""simple docstring"""
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt"""))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCAmelCase , """is not a valid model identifier"""):
get_file_from_repo("""bert-base-case""" , lowerCAmelCase)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCAmelCase , """is not a valid git identifier"""):
get_file_from_repo("""bert-base-cased""" , lowerCAmelCase , revision="""ahaha""")
_snake_case : Optional[int] = get_file_from_repo("""bert-base-cased""" , lowerCAmelCase)
# The name is the cached name which is not very easy to test, so instead we load the content.
_snake_case : Any = json.loads(open(lowerCAmelCase , """r""").read())
self.assertEqual(config["""hidden_size"""] , 768)
def UpperCamelCase_ ( self : Optional[int]) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case : List[str] = Path(lowerCAmelCase) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(lowerCAmelCase , """a.txt""") , str(lowerCAmelCase))
self.assertIsNone(get_file_from_repo(lowerCAmelCase , """b.txt"""))
| 477 | 1 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ = logging.getLogger()
lowerCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase ( _lowercase ):
"""simple docstring"""
def A__ ( self , __snake_case):
os.makedirs(__snake_case , exist_ok=__snake_case)
_UpperCamelCase : Dict = {'source': 'What is love ?', 'target': 'life'}
_UpperCamelCase : List[str] = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_UpperCamelCase : str = '\n'.join([contents[field]] * n_lines[split])
with open(os.path.join(__snake_case , f'''{split}.{field}''') , 'w') as f:
f.write(__snake_case)
def A__ ( self , __snake_case , __snake_case = "pytorch"):
_UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
_UpperCamelCase : int = os.path.join(__snake_case , 'output')
_UpperCamelCase : Optional[Any] = os.path.join(__snake_case , 'data')
self._create_dummy_data(data_dir=__snake_case)
_UpperCamelCase : Optional[Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''')
if is_apex_available():
testargs.append('--fp16')
else:
testargs.append('--gpus=0')
testargs.append('--distributed_backend=ddp_cpu')
testargs.append('--num_processes=2')
_UpperCamelCase : List[str] = [sys.executable, str(Path(finetune_rag.__file__).resolve())] + testargs
execute_subprocess_async(__snake_case , env=self.get_env())
_UpperCamelCase : Dict = os.path.join(__snake_case , 'metrics.json')
with open(__snake_case) as f:
_UpperCamelCase : Optional[Any] = json.load(__snake_case)
return result
@require_torch_gpu
def A__ ( self):
_UpperCamelCase : Dict = self._run_finetune(gpus=1)
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2)
@require_torch_multi_gpu
def A__ ( self):
_UpperCamelCase : str = self._run_finetune(gpus=2)
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2)
@require_torch_gpu
@require_ray
def A__ ( self):
_UpperCamelCase : List[str] = self._run_finetune(gpus=1 , distributed_retriever='ray')
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2)
@require_torch_multi_gpu
@require_ray
def A__ ( self):
_UpperCamelCase : Any = self._run_finetune(gpus=1 , distributed_retriever='ray')
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2)
| 720 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648 | 0 |
from __future__ import annotations
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(__SCREAMING_SNAKE_CASE ):
print(f'''{i}\t\t{d}''' )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for j in range(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list[float]:
UpperCAmelCase_ = [float("inf" )] * vertex_count
UpperCAmelCase_ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
UpperCAmelCase_ = distance[u] + w
UpperCAmelCase_ = check_negative_cycle(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE = int(input("Enter number of vertices: ").strip())
SCREAMING_SNAKE_CASE = int(input("Enter number of edges: ").strip())
SCREAMING_SNAKE_CASE = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
SCREAMING_SNAKE_CASE = {"src": src, "dst": dest, "weight": weight}
SCREAMING_SNAKE_CASE = int(input("\nEnter shortest path source:").strip())
SCREAMING_SNAKE_CASE = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 579 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
SCREAMING_SNAKE_CASE = spec.loader.load_module()
SCREAMING_SNAKE_CASE = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
SCREAMING_SNAKE_CASE = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
SCREAMING_SNAKE_CASE = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def snake_case__ ( ) -> Any:
UpperCAmelCase_ = []
for config_class in list(CONFIG_MAPPING.values() ):
UpperCAmelCase_ = False
# source code of `config_class`
UpperCAmelCase_ = inspect.getsource(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = _re_checkpoint.findall(__SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCAmelCase_ , UpperCAmelCase_ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase_ = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase_ = True
break
UpperCAmelCase_ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
UpperCAmelCase_ = "\n".join(sorted(__SCREAMING_SNAKE_CASE ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 579 | 1 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowerCamelCase__ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase_ ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
snake_case__ : Optional[int] =k.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return k
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
snake_case__ : Any =DEFAULTS.copy()
cfg_kwargs.update(SCREAMING_SNAKE_CASE )
snake_case__ : Tuple =PegasusConfig(**SCREAMING_SNAKE_CASE )
snake_case__ : Any =PegasusForConditionalGeneration(SCREAMING_SNAKE_CASE )
snake_case__ : Any =torch_model.model.state_dict()
snake_case__ : int ={}
for k, v in tf_weights.items():
snake_case__ : Any =rename_state_dict_key(SCREAMING_SNAKE_CASE )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
snake_case__ : Optional[int] =v.T
snake_case__ : Union[str, Any] =torch.tensor(SCREAMING_SNAKE_CASE , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
snake_case__ : str =torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
snake_case__ : Optional[int] =mapping["""shared.weight"""]
snake_case__ : Dict =mapping["""shared.weight"""]
snake_case__ : Union[str, Any] ={k: torch.zeros_like(SCREAMING_SNAKE_CASE ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**SCREAMING_SNAKE_CASE )
snake_case__ : Tuple =torch_model.model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =[
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
snake_case__ : Optional[int] =tf.train.list_variables(SCREAMING_SNAKE_CASE )
snake_case__ : Any ={}
snake_case__ : Optional[Any] =["""Adafactor""", """global_step"""]
for name, shape in tqdm(SCREAMING_SNAKE_CASE , desc='''converting tf checkpoint to dict''' ):
snake_case__ : Union[str, Any] =any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case__ : List[str] =tf.train.load_variable(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =array
return tf_weights
def lowercase_ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
# save tokenizer first
snake_case__ : List[Any] =Path(SCREAMING_SNAKE_CASE ).parent.name
snake_case__ : Optional[int] =task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
snake_case__ : List[str] =PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=SCREAMING_SNAKE_CASE )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(SCREAMING_SNAKE_CASE )
# convert model
snake_case__ : Optional[Any] =get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
snake_case__ : Any =task_specific_params
snake_case__ : List[str] =convert_pegasus(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE )
snake_case__ : int =torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(SCREAMING_SNAKE_CASE , Path(SCREAMING_SNAKE_CASE ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCamelCase__ = parser.parse_args()
if args.save_dir is None:
lowerCamelCase__ = Path(args.tf_ckpt_path).parent.name
lowerCamelCase__ = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 714 |
def lowercase_ ( SCREAMING_SNAKE_CASE : int = 60_08_51_47_51_43 ):
"""simple docstring"""
try:
snake_case__ : Any =int(SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
snake_case__ : Any =1
snake_case__ : List[str] =2
while i * i <= n:
while n % i == 0:
snake_case__ : Union[str, Any] =i
n //= i
i += 1
if n > 1:
snake_case__ : Any =n
return int(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 408 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __lowercase ( _UpperCAmelCase = "laptop" ) -> DataFrame:
'''simple docstring'''
__lowercase = f'''https://www.amazon.in/laptop/s?k={product}'''
__lowercase = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__lowercase = BeautifulSoup(requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).text )
# Initialize a Pandas dataframe with the column titles
__lowercase = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
__lowercase = item.ha.text
__lowercase = "https://www.amazon.in/" + item.ha.a["href"]
__lowercase = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
__lowercase = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__lowercase = "Not available"
try:
__lowercase = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__lowercase = ""
try:
__lowercase = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
__lowercase = float("nan" )
except AttributeError:
pass
__lowercase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__lowercase = " "
__lowercase = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCAmelCase__ = 'headphones'
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
| 321 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = """bridgetower_vision_model"""
def __init__( self , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=3 , lowerCAmelCase_=16 , lowerCAmelCase_=288 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-0_5 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , **lowerCAmelCase_ , ):
super().__init__(**lowerCAmelCase_ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_channels
__lowercase = patch_size
__lowercase = image_size
__lowercase = initializer_factor
__lowercase = layer_norm_eps
__lowercase = stop_gradient
__lowercase = share_layernorm
__lowercase = remove_last_layer
@classmethod
def snake_case__ ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ):
__lowercase , __lowercase = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("model_type" ) == "bridgetower":
__lowercase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = """bridgetower_text_model"""
def __init__( self , lowerCAmelCase_=5_0265 , lowerCAmelCase_=768 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=1 , lowerCAmelCase_=3072 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=514 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-0_5 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
super().__init__(**lowerCAmelCase_ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = initializer_factor
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = eos_token_id
@classmethod
def snake_case__ ( cls , lowerCAmelCase_ , **lowerCAmelCase_ ):
__lowercase , __lowercase = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
if config_dict.get("model_type" ) == "bridgetower":
__lowercase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = """bridgetower"""
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_="gelu" , lowerCAmelCase_=768 , lowerCAmelCase_=1 , lowerCAmelCase_=1E-0_5 , lowerCAmelCase_=False , lowerCAmelCase_="add" , lowerCAmelCase_=12 , lowerCAmelCase_=6 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ):
# TODO: remove this once the Hub files are updated.
__lowercase = kwargs.pop("text_config_dict" , lowerCAmelCase_ )
__lowercase = kwargs.pop("vision_config_dict" , lowerCAmelCase_ )
super().__init__(**lowerCAmelCase_ )
__lowercase = share_cross_modal_transformer_layers
__lowercase = hidden_act
__lowercase = hidden_size
__lowercase = initializer_factor
__lowercase = layer_norm_eps
__lowercase = share_link_tower_layers
__lowercase = link_tower_type
__lowercase = num_attention_heads
__lowercase = num_hidden_layers
__lowercase = tie_word_embeddings
__lowercase = init_layernorm_from_vision_encoder
if text_config is None:
__lowercase = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
__lowercase = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
__lowercase = BridgeTowerTextConfig(**lowerCAmelCase_ )
__lowercase = BridgeTowerVisionConfig(**lowerCAmelCase_ )
@classmethod
def snake_case__ ( cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.text_config.to_dict()
__lowercase = self.vision_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 321 | 1 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__a: str = logging.get_logger(__name__)
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : str = nn.functional.normalize(_snake_case )
lowercase__ : Any = nn.functional.normalize(_snake_case )
return torch.mm(_snake_case , normalized_text_embeds.t() )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = CLIPConfig
SCREAMING_SNAKE_CASE = ["CLIPEncoderLayer"]
def __init__( self , __lowerCAmelCase ) -> List[Any]:
super().__init__(__lowerCAmelCase )
lowercase__ : Optional[int] = CLIPVisionModel(config.vision_config )
lowercase__ : Any = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__lowerCAmelCase )
lowercase__ : Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__lowerCAmelCase )
lowercase__ : Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__lowerCAmelCase )
lowercase__ : int = nn.Parameter(torch.ones(17 ) , requires_grad=__lowerCAmelCase )
lowercase__ : str = nn.Parameter(torch.ones(3 ) , requires_grad=__lowerCAmelCase )
@torch.no_grad()
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : Union[str, Any] = self.vision_model(__lowerCAmelCase )[1] # pooled_output
lowercase__ : Union[str, Any] = self.visual_projection(__lowerCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ : Optional[Any] = cosine_distance(__lowerCAmelCase , self.special_care_embeds ).cpu().float().numpy()
lowercase__ : Optional[int] = cosine_distance(__lowerCAmelCase , self.concept_embeds ).cpu().float().numpy()
lowercase__ : str = []
lowercase__ : Tuple = image_embeds.shape[0]
for i in range(__lowerCAmelCase ):
lowercase__ : List[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowercase__ : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowercase__ : str = special_cos_dist[i][concept_idx]
lowercase__ : List[Any] = self.special_care_embeds_weights[concept_idx].item()
lowercase__ : str = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
lowercase__ : Dict = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
lowercase__ : Dict = cos_dist[i][concept_idx]
lowercase__ : Optional[Any] = self.concept_embeds_weights[concept_idx].item()
lowercase__ : Optional[int] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__lowerCAmelCase )
result.append(__lowerCAmelCase )
lowercase__ : Dict = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : Optional[int] = self.vision_model(__lowerCAmelCase )[1] # pooled_output
lowercase__ : int = self.visual_projection(__lowerCAmelCase )
lowercase__ : int = cosine_distance(__lowerCAmelCase , self.special_care_embeds )
lowercase__ : Optional[int] = cosine_distance(__lowerCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowercase__ : List[Any] = 0.0
lowercase__ : Optional[int] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowercase__ : Tuple = torch.any(special_scores > 0 , dim=1 )
lowercase__ : Any = special_care * 0.0_1
lowercase__ : List[str] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
lowercase__ : List[str] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowercase__ : Optional[Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 708 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a: Optional[int] = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Union[str, Any] = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: str = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__a: List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 428 | 0 |
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "vocab.txt"}
__magic_name__ = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
__magic_name__ = {
"facebook/esm2_t6_8M_UR50D": 1024,
"facebook/esm2_t12_35M_UR50D": 1024,
}
def _lowerCAmelCase ( A__: Union[str, Any] ):
'''simple docstring'''
with open(A__ , '''r''' ) as f:
UpperCAmelCase = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case="<unk>" , _snake_case="<cls>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case="<eos>" , **_snake_case , ) -> List[str]:
"""simple docstring"""
super().__init__(**_snake_case )
UpperCAmelCase = load_vocab_file(_snake_case )
UpperCAmelCase = dict(enumerate(self.all_tokens ) )
UpperCAmelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCAmelCase = unk_token
UpperCAmelCase = cls_token
UpperCAmelCase = pad_token
UpperCAmelCase = mask_token
UpperCAmelCase = eos_token
UpperCAmelCase = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def snake_case_ ( self , _snake_case ) -> str:
"""simple docstring"""
return self._id_to_token.get(_snake_case , self.unk_token )
def snake_case_ ( self , _snake_case ) -> int:
"""simple docstring"""
return self._token_to_id.get(_snake_case , self._token_to_id.get(self.unk_token ) )
def snake_case_ ( self , _snake_case , **_snake_case ) -> Tuple:
"""simple docstring"""
return text.split()
def snake_case_ ( self , _snake_case=False ) -> int:
"""simple docstring"""
return len(self._id_to_token )
def snake_case_ ( self ) -> int:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def snake_case_ ( self , _snake_case ) -> int:
"""simple docstring"""
return self._token_to_id.get(_snake_case , self._token_to_id.get(self.unk_token ) )
def snake_case_ ( self , _snake_case ) -> str:
"""simple docstring"""
return self._id_to_token.get(_snake_case , self.unk_token )
def snake_case_ ( self , _snake_case , _snake_case = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def snake_case_ ( self , _snake_case , _snake_case = None , _snake_case = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCAmelCase = [1] + ([0] * len(_snake_case )) + [1]
if token_ids_a is not None:
mask += [0] * len(_snake_case ) + [1]
return mask
def snake_case_ ( self , _snake_case , _snake_case ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = os.path.join(_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(_snake_case , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def snake_case_ ( self ) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=_snake_case )
def snake_case_ ( self , _snake_case , _snake_case = False ) -> int:
"""simple docstring"""
return super()._add_tokens(_snake_case , special_tokens=_snake_case )
| 254 |
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BlenderbotConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = """gelu"""
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=False , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=20 , _snake_case=2 , _snake_case=1 , _snake_case=0 , ) -> Dict:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_blenderbot_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, inputs_dict
def snake_case_ ( self , _snake_case , _snake_case ) -> Any:
"""simple docstring"""
UpperCAmelCase = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(_snake_case , attention_mask=_snake_case , head_mask=_snake_case , use_cache=_snake_case )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase = model(_snake_case , attention_mask=_snake_case )[0]
UpperCAmelCase = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1e-3 )
def _lowerCAmelCase ( A__: int , A__: Tuple , A__: Union[str, Any] , A__: Optional[int]=None , A__: Any=None , A__: int=None , A__: str=None , A__: int=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = TFBlenderbotModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_snake_case )
def snake_case_ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["""My friends are cool but they eat too many carbs."""]
__SCREAMING_SNAKE_CASE = """facebook/blenderbot-400M-distill"""
@cached_property
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 254 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''BlipImageProcessor'''
SCREAMING_SNAKE_CASE_ : Any = '''AutoTokenizer'''
def __init__( self : Tuple ,__A : Tuple ,__A : Union[str, Any] ,__A : Optional[Any] ) -> Tuple:
super().__init__(__A ,__A )
# add QFormer tokenizer
_lowercase = qformer_tokenizer
def __call__( self : List[Any] ,__A : ImageInput = None ,__A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,__A : bool = True ,__A : Union[bool, str, PaddingStrategy] = False ,__A : Union[bool, str, TruncationStrategy] = None ,__A : Optional[int] = None ,__A : int = 0 ,__A : Optional[int] = None ,__A : Optional[bool] = None ,__A : bool = False ,__A : bool = False ,__A : bool = False ,__A : bool = False ,__A : bool = False ,__A : bool = True ,__A : Optional[Union[str, TensorType]] = None ,**__A : Union[str, Any] ,) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_lowercase = BatchFeature()
if text is not None:
_lowercase = self.tokenizer(
text=__A ,add_special_tokens=__A ,padding=__A ,truncation=__A ,max_length=__A ,stride=__A ,pad_to_multiple_of=__A ,return_attention_mask=__A ,return_overflowing_tokens=__A ,return_special_tokens_mask=__A ,return_offsets_mapping=__A ,return_token_type_ids=__A ,return_length=__A ,verbose=__A ,return_tensors=__A ,**__A ,)
encoding.update(__A )
_lowercase = self.qformer_tokenizer(
text=__A ,add_special_tokens=__A ,padding=__A ,truncation=__A ,max_length=__A ,stride=__A ,pad_to_multiple_of=__A ,return_attention_mask=__A ,return_overflowing_tokens=__A ,return_special_tokens_mask=__A ,return_offsets_mapping=__A ,return_token_type_ids=__A ,return_length=__A ,verbose=__A ,return_tensors=__A ,**__A ,)
_lowercase = qformer_text_encoding.pop('input_ids' )
_lowercase = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_lowercase = self.image_processor(__A ,return_tensors=__A )
encoding.update(__A )
return encoding
def __UpperCAmelCase ( self : Optional[Any] ,*__A : str ,**__A : int ) -> List[Any]:
return self.tokenizer.batch_decode(*__A ,**__A )
def __UpperCAmelCase ( self : Any ,*__A : Tuple ,**__A : Union[str, Any] ) -> Dict:
return self.tokenizer.decode(*__A ,**__A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase = self.tokenizer.model_input_names
_lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __UpperCAmelCase ( self : int ,__A : Optional[int] ,**__A : Any ) -> List[Any]:
if os.path.isfile(__A ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(__A ,exist_ok=__A )
_lowercase = os.path.join(__A ,'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(__A )
return super().save_pretrained(__A ,**__A )
@classmethod
def __UpperCAmelCase ( cls : List[str] ,__A : List[Any] ,**__A : Optional[int] ) -> Union[str, Any]:
_lowercase = AutoTokenizer.from_pretrained(__A ,subfolder='qformer_tokenizer' )
_lowercase = cls._get_arguments_from_pretrained(__A ,**__A )
args.append(__A )
return cls(*__A ) | 535 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
snake_case = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str ,__A : int = 101 ) -> Optional[int]:
_lowercase = length
def __len__( self : List[Any] ) -> Any:
return self.length
def __getitem__( self : List[Any] ,__A : Optional[int] ) -> int:
return i
class A_ :
"""simple docstring"""
def __call__( self : str ,__A : Union[str, Any] ) -> Any:
return {"input_ids": torch.tensor(__A ), "labels": torch.tensor(__A )}
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict ) -> Any:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowercase = nn.Linear(120 ,80 )
def __UpperCAmelCase ( self : Dict ,__A : Dict ,__A : Any=None ) -> Tuple:
if labels is not None:
return torch.tensor(0.0 ,device=input_ids.device ), input_ids
else:
return input_ids
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@require_torch_neuroncore
def __UpperCAmelCase ( self : int ) -> Any:
_lowercase = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
_lowercase = self.get_auto_remove_tmp_dir()
_lowercase = F"""--output_dir {output_dir}""".split()
_lowercase = ['torchrun'] + distributed_args + args
execute_subprocess_async(__A ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@require_torch_multi_gpu
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
_lowercase = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
_lowercase = self.get_auto_remove_tmp_dir()
_lowercase = F"""--output_dir {output_dir}""".split()
_lowercase = ['torchrun'] + distributed_args + args
execute_subprocess_async(__A ,env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
snake_case = HfArgumentParser((TrainingArguments,))
snake_case = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
snake_case = DummyDataset(dataset_length)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :EvalPrediction ) -> Dict:
_lowercase = list(range(len(snake_case__ ) ) )
_lowercase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
snake_case = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
snake_case = 2
snake_case = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
snake_case = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
snake_case = None | 535 | 1 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A__ = logging.get_logger(__name__)
A__ = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : Any = """conditional_detr"""
__lowerCAmelCase : Union[str, Any] = ["""past_key_values"""]
__lowerCAmelCase : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self :Optional[int] ,__lowercase :Union[str, Any]=True ,__lowercase :int=None ,__lowercase :Optional[int]=3 ,__lowercase :Optional[int]=3_0_0 ,__lowercase :int=6 ,__lowercase :List[str]=2_0_4_8 ,__lowercase :Union[str, Any]=8 ,__lowercase :Dict=6 ,__lowercase :List[str]=2_0_4_8 ,__lowercase :int=8 ,__lowercase :List[Any]=0.0 ,__lowercase :Tuple=0.0 ,__lowercase :Optional[Any]=True ,__lowercase :List[Any]="relu" ,__lowercase :Any=2_5_6 ,__lowercase :Any=0.1 ,__lowercase :List[str]=0.0 ,__lowercase :Optional[int]=0.0 ,__lowercase :Tuple=0.02 ,__lowercase :List[Any]=1.0 ,__lowercase :List[Any]=False ,__lowercase :List[Any]="sine" ,__lowercase :Union[str, Any]="resnet50" ,__lowercase :Tuple=True ,__lowercase :List[Any]=False ,__lowercase :Optional[int]=2 ,__lowercase :List[Any]=5 ,__lowercase :int=2 ,__lowercase :Union[str, Any]=1 ,__lowercase :List[str]=1 ,__lowercase :Union[str, Any]=2 ,__lowercase :List[Any]=5 ,__lowercase :List[Any]=2 ,__lowercase :List[str]=0.25 ,**__lowercase :Dict ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case__ : Any = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__lowercase ,__lowercase ):
snake_case__ : Union[str, Any] = backbone_config.get('''model_type''' )
snake_case__ : Dict = CONFIG_MAPPING[backbone_model_type]
snake_case__ : Dict = config_class.from_dict(__lowercase )
snake_case__ : List[str] = use_timm_backbone
snake_case__ : List[str] = backbone_config
snake_case__ : Optional[Any] = num_channels
snake_case__ : int = num_queries
snake_case__ : int = d_model
snake_case__ : Optional[Any] = encoder_ffn_dim
snake_case__ : Optional[Any] = encoder_layers
snake_case__ : List[str] = encoder_attention_heads
snake_case__ : Optional[Any] = decoder_ffn_dim
snake_case__ : Tuple = decoder_layers
snake_case__ : Dict = decoder_attention_heads
snake_case__ : Dict = dropout
snake_case__ : Dict = attention_dropout
snake_case__ : str = activation_dropout
snake_case__ : Tuple = activation_function
snake_case__ : Any = init_std
snake_case__ : str = init_xavier_std
snake_case__ : List[str] = encoder_layerdrop
snake_case__ : int = decoder_layerdrop
snake_case__ : Any = encoder_layers
snake_case__ : str = auxiliary_loss
snake_case__ : str = position_embedding_type
snake_case__ : Optional[Any] = backbone
snake_case__ : Union[str, Any] = use_pretrained_backbone
snake_case__ : int = dilation
# Hungarian matcher
snake_case__ : Tuple = class_cost
snake_case__ : List[str] = bbox_cost
snake_case__ : Union[str, Any] = giou_cost
# Loss coefficients
snake_case__ : Optional[Any] = mask_loss_coefficient
snake_case__ : Union[str, Any] = dice_loss_coefficient
snake_case__ : Tuple = cls_loss_coefficient
snake_case__ : Tuple = bbox_loss_coefficient
snake_case__ : List[Any] = giou_loss_coefficient
snake_case__ : int = focal_alpha
super().__init__(is_encoder_decoder=__lowercase ,**__lowercase )
@property
def __lowerCamelCase ( self :List[str] ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self :Any ):
return self.d_model
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case__ : Optional[int] = self.backbone_config.to_dict()
snake_case__ : Tuple = self.__class__.model_type
return output
class a ( __lowerCamelCase ):
__lowerCAmelCase : Optional[Any] = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self :List[Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __lowerCamelCase ( self :str ):
return 1e-5
@property
def __lowerCamelCase ( self :Dict ):
return 1_2
| 252 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
A__ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
A__ = parser.parse_args()
if args.model_type == "roberta":
A__ = RobertaForMaskedLM.from_pretrained(args.model_name)
A__ = '''roberta'''
elif args.model_type == "gpt2":
A__ = GPTaLMHeadModel.from_pretrained(args.model_name)
A__ = '''transformer'''
A__ = model.state_dict()
A__ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
A__ = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
A__ = f"""{prefix}.embeddings.{w}.weight"""
A__ = state_dict[param_name]
for w in ["weight", "bias"]:
A__ = f"""{prefix}.embeddings.LayerNorm.{w}"""
A__ = state_dict[param_name]
# Transformer Blocks #
A__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
A__ = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
A__ = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
A__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
A__ = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
A__ = state_dict[f"""lm_head.dense.{w}"""]
A__ = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
A__ = state_dict[f"""{prefix}.ln_f.{w}"""]
A__ = state_dict['''lm_head.weight''']
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 252 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
'''simple docstring'''
snake_case_ = ['pixel_values']
def __init__( self : Union[str, Any] , UpperCamelCase_ : bool = True , UpperCamelCase_ : int = 32 , UpperCamelCase_ : List[str]=PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , **UpperCamelCase_ : List[str] , ) -> None:
'''simple docstring'''
_lowercase : Any = do_resize
_lowercase : Any = do_rescale
_lowercase : Optional[Any] = size_divisor
_lowercase : Any = resample
super().__init__(**UpperCamelCase_ )
def __lowercase ( self : Dict , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[ChannelDimension] = None , **UpperCamelCase_ : List[str] ) -> np.ndarray:
'''simple docstring'''
_lowercase , _lowercase : Dict = get_image_size(UpperCamelCase_ )
# Rounds the height and width down to the closest multiple of size_divisor
_lowercase : int = height // size_divisor * size_divisor
_lowercase : List[Any] = width // size_divisor * size_divisor
_lowercase : Dict = resize(UpperCamelCase_ , (new_h, new_w) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
return image
def __lowercase ( self : Union[str, Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : float , UpperCamelCase_ : Optional[ChannelDimension] = None , **UpperCamelCase_ : Dict ) -> np.ndarray:
'''simple docstring'''
return rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __lowercase ( self : Optional[int] , UpperCamelCase_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[Union[TensorType, str]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : int , ) -> BatchFeature:
'''simple docstring'''
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : List[str] = size_divisor if size_divisor is not None else self.size_divisor
_lowercase : Any = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
_lowercase : Tuple = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
_lowercase : List[str] = [to_numpy_array(UpperCamelCase_ ) for img in images]
if do_resize:
_lowercase : Union[str, Any] = [self.resize(UpperCamelCase_ , size_divisor=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
_lowercase : Optional[int] = [self.rescale(UpperCamelCase_ , scale=1 / 255 ) for image in images]
_lowercase : List[Any] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
_lowercase : List[Any] = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 411 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = """convnextv2"""
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.02 , lowerCAmelCase__=1e-12 , lowerCAmelCase__=0.0 , lowerCAmelCase__=224 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> int:
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_stages
SCREAMING_SNAKE_CASE = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
SCREAMING_SNAKE_CASE = [3, 3, 9, 3] if depths is None else depths
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = ['stem'] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
| 247 | 0 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowercase__ , lowercase__ , lowercase__ = False, False, False
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = None
# Automatically constructed
lowerCamelCase__ = "dict"
lowerCamelCase__ = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
lowerCamelCase__ = field(default="""Audio""", init=lowercase, repr=lowercase )
def __call__( self ):
return self.pa_type
def A_ ( self , lowercase ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(lowercase , lowercase ):
return {"bytes": None, "path": value}
elif isinstance(lowercase , lowercase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : int = BytesIO()
sf.write(lowercase , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : List[str] = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
_lowerCamelCase : Any = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 32767
_lowerCamelCase : Dict = BytesIO(bytes() )
sf.write(lowercase , lowercase , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def A_ ( self , lowercase , lowercase = None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
_lowerCamelCase, _lowerCamelCase : Any = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
_lowerCamelCase : str = xsplitext(lowercase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
_lowerCamelCase : List[str] = token_per_repo_id or {}
_lowerCamelCase : List[str] = path.split('::' )[-1]
try:
_lowerCamelCase : Optional[int] = string_to_dict(lowercase , config.HUB_DATASETS_URL )['repo_id']
_lowerCamelCase : List[Any] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : int = None
with xopen(lowercase , 'rb' , use_auth_token=lowercase ) as f:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = sf.read(lowercase )
else:
_lowerCamelCase, _lowerCamelCase : List[str] = sf.read(lowercase )
_lowerCamelCase : Dict = array.T
if self.mono:
_lowerCamelCase : List[str] = librosa.to_mono(lowercase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : List[str] = librosa.resample(lowercase , orig_sr=lowercase , target_sr=self.sampling_rate )
_lowerCamelCase : Any = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def A_ ( self ):
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def A_ ( self , lowercase ):
if pa.types.is_string(storage.type ):
_lowerCamelCase : List[Any] = pa.array([None] * len(lowercase ) , type=pa.binary() )
_lowerCamelCase : Optional[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase : str = pa.array([None] * len(lowercase ) , type=pa.string() )
_lowerCamelCase : List[Any] = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
_lowerCamelCase : Tuple = pa.array([Audio().encode_example(lowercase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_lowerCamelCase : Dict = storage.field('bytes' )
else:
_lowerCamelCase : Optional[Any] = pa.array([None] * len(lowercase ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_lowerCamelCase : Tuple = storage.field('path' )
else:
_lowerCamelCase : Tuple = pa.array([None] * len(lowercase ) , type=pa.string() )
_lowerCamelCase : int = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(lowercase , self.pa_type )
def A_ ( self , lowercase ):
@no_op_if_value_is_null
def path_to_bytes(lowercase ):
with xopen(lowercase , 'rb' ) as f:
_lowerCamelCase : Dict = f.read()
return bytes_
_lowerCamelCase : str = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_lowerCamelCase : Union[str, Any] = pa.array(
[os.path.basename(lowercase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(lowercase , self.pa_type ) | 492 |
"""simple docstring"""
from __future__ import annotations
lowercase__ = 10
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : Optional[int] = max(lowercase__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCamelCase : list[list] = [[] for _ in range(lowercase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCamelCase : Dict = int((i / placement) % RADIX )
buckets[tmp].append(lowercase__ )
# put each buckets' contents into list_of_ints
_lowerCamelCase : str = 0
for b in range(lowercase__ ):
for i in buckets[b]:
_lowerCamelCase : str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod() | 492 | 1 |
'''simple docstring'''
import json
import sys
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = results[benchmark_name]
UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase_ : Any = "| metric |"
UpperCAmelCase_ : Any = "|--------|"
UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = benchmark_res[metric_name]
UpperCAmelCase_ : Union[str, Any] = metric_vals["new"]
UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 71 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_lengths
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =gelu_activation
lowerCamelCase_ =sinusoidal_embeddings
lowerCamelCase_ =causal
lowerCamelCase_ =asm
lowerCamelCase_ =n_langs
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_special
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =summary_type
lowerCamelCase_ =use_proj
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_input_lengths:
lowerCamelCase_ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float()
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase_ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, )
lowerCamelCase_ =model(
lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase )
((lowerCamelCase_), ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.num_choices
lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowerCamelCase_ =model(
lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : Tuple =(
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ =True
lowerCamelCase_ =model_class(config=lowerCAmelCase )
lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =torch.jit.trace(
lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) )
lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase )[0]
lowerCamelCase_ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
| 676 | 0 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _A( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = PhobertTokenizer
UpperCamelCase : Any = False
def UpperCAmelCase_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A : Dict = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
__A : int = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__A : Any = ['''#version: 0.2''', '''l à</w>''']
__A : List[Any] = {'''unk_token''': '''<unk>'''}
__A : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self , **_A ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self , _A ):
__A : Union[str, Any] = '''Tôi là VinAI Research'''
__A : Optional[int] = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def UpperCAmelCase_ ( self ):
__A : Any = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__A : str = '''Tôi là VinAI Research'''
__A : Optional[Any] = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
__A : int = tokenizer.tokenize(UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__A : Any = tokens + [tokenizer.unk_token]
__A : List[str] = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
| 712 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Tuple = tempfile.mkdtemp()
# fmt: off
__A : Union[str, Any] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Dict = dict(zip(_A , range(len(_A ) ) ) )
__A : int = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : Optional[Any] = {'unk_token': '<unk>'}
__A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
__A : Union[str, Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__A : List[str] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_tokenizer()
__A : Dict = self.get_rust_tokenizer()
__A : Optional[Any] = self.get_image_processor()
__A : Dict = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__A : Any = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__A : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : Tuple = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : int = self.get_image_processor(do_normalize=_A )
__A : int = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : List[str] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : List[Any] = self.prepare_image_inputs()
__A : Any = image_processor(_A , return_tensors='np' )
__A : Tuple = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Union[str, Any] = 'lower newer'
__A : Any = processor(text=_A , return_tensors='np' )
__A : Dict = tokenizer(_A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Tuple = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Tuple = 'lower newer'
__A : Union[str, Any] = self.prepare_image_inputs()
__A : List[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[int] = 'google/owlvit-base-patch32'
__A : str = OwlViTProcessor.from_pretrained(_A )
__A : Any = ['cat', 'nasa badge']
__A : List[Any] = processor(text=_A )
__A : Dict = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Tuple = 'google/owlvit-base-patch32'
__A : Any = OwlViTProcessor.from_pretrained(_A )
__A : int = [['cat', 'nasa badge'], ['person']]
__A : str = processor(text=_A )
__A : int = 16
__A : Optional[int] = len(_A )
__A : int = max([len(_A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : int = 'google/owlvit-base-patch32'
__A : List[str] = OwlViTProcessor.from_pretrained(_A )
__A : Tuple = ['cat', 'nasa badge']
__A : Dict = processor(text=_A )
__A : Tuple = 16
__A : str = inputs['input_ids']
__A : str = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self ):
__A : Dict = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : Dict = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Any = self.prepare_image_inputs()
__A : Tuple = self.prepare_image_inputs()
__A : Any = processor(images=_A , query_images=_A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Any = processor.batch_decode(_A )
__A : Union[str, Any] = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 77 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase_ = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
lowercase_ = {'allegro/herbert-base-cased': 5_14}
lowercase_ = {}
class __a ( __snake_case ):
lowerCamelCase : Dict =VOCAB_FILES_NAMES
lowerCamelCase : int =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Any =HerbertTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="</s>" , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase_ = [self.cls_token_id]
lowerCAmelCase_ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase ) | 552 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 16
lowercase_ = 32
def UpperCAmelCase ( _lowercase : Accelerator , _lowercase : int = 1_6 , _lowercase : str = "bert-base-cased" ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_lowercase )
lowerCAmelCase_ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_lowercase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_lowercase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def UpperCAmelCase ( _lowercase : int , _lowercase : Any , _lowercase : Any , _lowercase : str ) -> List[Any]:
"""simple docstring"""
model.eval()
lowerCAmelCase_ = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ = model(**_lowercase )
lowerCAmelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
lowerCAmelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
lowerCAmelCase_ = metric.compute()
return eval_metric["accuracy"]
def UpperCAmelCase ( _lowercase : Dict , _lowercase : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ = config['''lr''']
lowerCAmelCase_ = int(config['''num_epochs'''] )
lowerCAmelCase_ = int(config['''seed'''] )
lowerCAmelCase_ = int(config['''batch_size'''] )
lowerCAmelCase_ = args.model_name_or_path
set_seed(_lowercase )
lowerCAmelCase_ , lowerCAmelCase_ = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
lowerCAmelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCAmelCase_ = 1
lowerCAmelCase_ = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
lowerCAmelCase_ = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ = 0
lowerCAmelCase_ = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ = args.resume_from_checkpoint.split('''epoch_''' )[1]
lowerCAmelCase_ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ = int(_lowercase ) + 1
lowerCAmelCase_ = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.print('''resumed checkpoint performance:''' , _lowercase )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , '''r''' ) as f:
lowerCAmelCase_ = json.load(_lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
lowerCAmelCase_ = model(**_lowercase )
lowerCAmelCase_ = outputs.loss
lowerCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ = F"""epoch_{epoch}"""
lowerCAmelCase_ = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
lowerCAmelCase_ = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
lowerCAmelCase_ = accuracy
lowerCAmelCase_ = lr_scheduler.get_lr()[0]
lowerCAmelCase_ = optimizer.param_groups[0]['''lr''']
lowerCAmelCase_ = epoch
lowerCAmelCase_ = overall_step
accelerator.print(F"""epoch {epoch}:""" , _lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_lowercase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , )
parser.add_argument(
'''--output_dir''' , type=_lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=_lowercase , default=_lowercase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=_lowercase , default=_lowercase , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowercase , default=2 , help='''Number of train epochs.''' , )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main() | 552 | 1 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
UpperCAmelCase_ : int = AutoencoderKL
UpperCAmelCase_ : Optional[Any] = 'sample'
UpperCAmelCase_ : Dict = 1E-2
@property
def _lowerCamelCase ( self ) -> List[str]:
__A = 4
__A = 3
__A = (32, 32)
__A = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase__ )
return {"sample": image}
@property
def _lowerCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _lowerCamelCase ( self ) -> int:
return (3, 32, 32)
def _lowerCamelCase ( self ) -> List[Any]:
__A = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
__A = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self ) -> Optional[int]:
pass
def _lowerCamelCase ( self ) -> List[Any]:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _lowerCamelCase ( self ) -> int:
# enable deterministic behavior for gradient checkpointing
__A , __A = self.prepare_init_args_and_inputs_for_common()
__A = self.model_class(**lowercase__ )
model.to(lowercase__ )
assert not model.is_gradient_checkpointing and model.training
__A = model(**lowercase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__A = torch.randn_like(lowercase__ )
__A = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__A = self.model_class(**lowercase__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowercase__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__A = model_a(**lowercase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__A = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__A = dict(model.named_parameters() )
__A = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def _lowerCamelCase ( self ) -> Tuple:
__A , __A = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowercase__ )
__A = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowerCamelCase ( self ) -> Optional[int]:
__A = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
__A = model.to(lowercase__ )
model.eval()
if torch_device == "mps":
__A = torch.manual_seed(0 )
else:
__A = torch.Generator(device=lowercase__ ).manual_seed(0 )
__A = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__A = image.to(lowercase__ )
with torch.no_grad():
__A = model(lowercase__ , sample_posterior=lowercase__ , generator=lowercase__ ).sample
__A = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__A = torch.tensor(
[
-4.00_78e-01,
-3.83_23e-04,
-1.26_81e-01,
-1.14_62e-01,
2.00_95e-01,
1.08_93e-01,
-8.82_47e-02,
-3.03_61e-01,
-9.86_44e-03,
] )
elif torch_device == "cpu":
__A = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__A = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowercase__ , lowercase__ , rtol=1e-2 ) )
@slow
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self , lowercase__ , lowercase__ ) -> Tuple:
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(lowercase__ ) for s in shape] )}.npy"""
def _lowerCamelCase ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self , lowercase__=0 , lowercase__=(4, 3, 512, 512) , lowercase__=False ) -> Optional[int]:
__A = torch.floataa if fpaa else torch.floataa
__A = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase__ , lowercase__ ) ) ).to(lowercase__ ).to(lowercase__ )
return image
def _lowerCamelCase ( self , lowercase__="CompVis/stable-diffusion-v1-4" , lowercase__=False ) -> List[Any]:
__A = "fp16" if fpaa else None
__A = torch.floataa if fpaa else torch.floataa
__A = AutoencoderKL.from_pretrained(
lowercase__ , subfolder="vae" , torch_dtype=lowercase__ , revision=lowercase__ , )
model.to(lowercase__ ).eval()
return model
def _lowerCamelCase ( self , lowercase__=0 ) -> int:
if torch_device == "mps":
return torch.manual_seed(lowercase__ )
return torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> str:
__A = self.get_sd_vae_model()
__A = self.get_sd_image(lowercase__ )
__A = self.get_generator(lowercase__ )
with torch.no_grad():
__A = model(lowercase__ , generator=lowercase__ , sample_posterior=lowercase__ ).sample
assert sample.shape == image.shape
__A = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__A = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowercase__ , lowercase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def _lowerCamelCase ( self , lowercase__ , lowercase__ ) -> int:
__A = self.get_sd_vae_model(fpaa=lowercase__ )
__A = self.get_sd_image(lowercase__ , fpaa=lowercase__ )
__A = self.get_generator(lowercase__ )
with torch.no_grad():
__A = model(lowercase__ , generator=lowercase__ , sample_posterior=lowercase__ ).sample
assert sample.shape == image.shape
__A = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__A = torch.tensor(lowercase__ )
assert torch_all_close(lowercase__ , lowercase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> int:
__A = self.get_sd_vae_model()
__A = self.get_sd_image(lowercase__ )
with torch.no_grad():
__A = model(lowercase__ ).sample
assert sample.shape == image.shape
__A = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__A = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowercase__ , lowercase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def _lowerCamelCase ( self , lowercase__ , lowercase__ ) -> Optional[int]:
__A = self.get_sd_vae_model()
__A = self.get_sd_image(lowercase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__A = model.decode(lowercase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__A = sample[-1, -2:, :2, -2:].flatten().cpu()
__A = torch.tensor(lowercase__ )
assert torch_all_close(lowercase__ , lowercase__ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def _lowerCamelCase ( self , lowercase__ , lowercase__ ) -> Dict:
__A = self.get_sd_vae_model(fpaa=lowercase__ )
__A = self.get_sd_image(lowercase__ , shape=(3, 4, 64, 64) , fpaa=lowercase__ )
with torch.no_grad():
__A = model.decode(lowercase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__A = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__A = torch.tensor(lowercase__ )
assert torch_all_close(lowercase__ , lowercase__ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowerCamelCase ( self , lowercase__ ) -> str:
__A = self.get_sd_vae_model(fpaa=lowercase__ )
__A = self.get_sd_image(lowercase__ , shape=(3, 4, 64, 64) , fpaa=lowercase__ )
with torch.no_grad():
__A = model.decode(lowercase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__A = model.decode(lowercase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase__ , lowercase__ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowerCamelCase ( self , lowercase__ ) -> int:
__A = self.get_sd_vae_model()
__A = self.get_sd_image(lowercase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__A = model.decode(lowercase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__A = model.decode(lowercase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase__ , lowercase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def _lowerCamelCase ( self , lowercase__ , lowercase__ ) -> Optional[Any]:
__A = self.get_sd_vae_model()
__A = self.get_sd_image(lowercase__ )
__A = self.get_generator(lowercase__ )
with torch.no_grad():
__A = model.encode(lowercase__ ).latent_dist
__A = dist.sample(generator=lowercase__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__A = sample[0, -1, -3:, -3:].flatten().cpu()
__A = torch.tensor(lowercase__ )
__A = 3e-3 if torch_device != "mps" else 1e-2
assert torch_all_close(lowercase__ , lowercase__ , atol=lowercase__ )
| 205 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , ):
'''simple docstring'''
__A = {}
if train_file is not None:
__A = [train_file]
if eval_file is not None:
__A = [eval_file]
if test_file is not None:
__A = [test_file]
__A = datasets.load_dataset("csv" , data_files=lowerCAmelCase__ )
__A = list(ds[list(files.keys() )[0]].features.keys() )
__A = features_name.pop(lowerCAmelCase__ )
__A = list(set(ds[list(files.keys() )[0]][label_name] ) )
__A = {label: i for i, label in enumerate(lowerCAmelCase__ )}
__A = tokenizer.model_input_names
__A = {}
if len(lowerCAmelCase__ ) == 1:
for k in files.keys():
__A = ds[k].map(
lambda lowerCAmelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" ) , batched=lowerCAmelCase__ , )
elif len(lowerCAmelCase__ ) == 2:
for k in files.keys():
__A = ds[k].map(
lambda lowerCAmelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" , ) , batched=lowerCAmelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__A = {k: v for k, v in ex.items() if k in input_names}
__A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__A = {k: v for k, v in ex.items() if k in input_names}
__A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__A = {k: v for k, v in ex.items() if k in input_names}
__A = labelaid[ex[label_name]]
yield (d, label)
__A = (
tf.data.Dataset.from_generator(
lowerCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__A = (
tf.data.Dataset.from_generator(
lowerCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__A = (
tf.data.Dataset.from_generator(
lowerCAmelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
snake_case_ : str =logging.getLogger(__name__)
@dataclass
class a__ :
UpperCAmelCase_ : int = field(metadata={'help': 'Which column contains the label'} )
UpperCAmelCase_ : str = field(default=lowerCAmelCase__ , metadata={'help': 'The path of the training file'} )
UpperCAmelCase_ : Optional[str] = field(default=lowerCAmelCase__ , metadata={'help': 'The path of the development file'} )
UpperCAmelCase_ : Optional[str] = field(default=lowerCAmelCase__ , metadata={'help': 'The path of the test file'} )
UpperCAmelCase_ : int = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCAmelCase_ : bool = field(
default=lowerCAmelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class a__ :
UpperCAmelCase_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCAmelCase_ : bool = field(default=lowerCAmelCase__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase_ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCAmelCase ( ):
'''simple docstring'''
__A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__A , __A , __A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__A , __A , __A , __A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCAmelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCAmelCase__ ) , labelaid=lowerCAmelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCAmelCase__ ) -> Dict:
__A = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__A = TFTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__A = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__A = trainer.evaluate()
__A = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(lowerCAmelCase__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(lowerCAmelCase__ )
return results
if __name__ == "__main__":
main()
| 205 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
A : Dict = random.Random()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ):
'''simple docstring'''
if rng is None:
__lowerCAmelCase = global_rng
__lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __a , __a=7 , __a=4_00 , __a=20_00 , __a=20_48 , __a=1_28 , __a=1 , __a=5_12 , __a=30 , __a=4_41_00 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = min_seq_length
__lowerCAmelCase = max_seq_length
__lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase = spectrogram_length
__lowerCAmelCase = feature_size
__lowerCAmelCase = num_audio_channels
__lowerCAmelCase = hop_length
__lowerCAmelCase = chunk_length
__lowerCAmelCase = sampling_rate
def snake_case ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case ( self , __a=False , __a=False ):
def _flatten(__a ):
return list(itertools.chain(*__a ) )
if equal_length:
__lowerCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase = [np.asarray(__a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =TvltFeatureExtractor
def snake_case ( self ):
__lowerCAmelCase = TvltFeatureExtractionTester(self )
def snake_case ( self ):
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__a , "spectrogram_length" ) )
self.assertTrue(hasattr(__a , "feature_size" ) )
self.assertTrue(hasattr(__a , "num_audio_channels" ) )
self.assertTrue(hasattr(__a , "hop_length" ) )
self.assertTrue(hasattr(__a , "chunk_length" ) )
self.assertTrue(hasattr(__a , "sampling_rate" ) )
def snake_case ( self ):
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = feat_extract_first.save_pretrained(__a )[0]
check_json_file_has_correct_format(__a )
__lowerCAmelCase = self.feature_extraction_class.from_pretrained(__a )
__lowerCAmelCase = feat_extract_first.to_dict()
__lowerCAmelCase = feat_extract_second.to_dict()
__lowerCAmelCase = dict_first.pop("mel_filters" )
__lowerCAmelCase = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(__a , __a ) )
self.assertEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , "feat_extract.json" )
feat_extract_first.to_json_file(__a )
__lowerCAmelCase = self.feature_extraction_class.from_json_file(__a )
__lowerCAmelCase = feat_extract_first.to_dict()
__lowerCAmelCase = feat_extract_second.to_dict()
__lowerCAmelCase = dict_first.pop("mel_filters" )
__lowerCAmelCase = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(__a , __a ) )
self.assertEqual(__a , __a )
def snake_case ( self ):
# Initialize feature_extractor
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCAmelCase = [np.asarray(__a ) for speech_input in speech_inputs]
# Test not batched input
__lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__lowerCAmelCase = feature_extractor(__a , return_tensors="np" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__lowerCAmelCase = feature_extractor(
__a , return_tensors="np" , sampling_rate=4_41_00 , mask_audio=__a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__lowerCAmelCase = np.asarray(__a )
__lowerCAmelCase = feature_extractor(__a , return_tensors="np" , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case ( self , __a ):
__lowerCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__lowerCAmelCase = ds.sort("id" ).select(range(__a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def snake_case ( self ):
__lowerCAmelCase = self._load_datasamples(1 )
__lowerCAmelCase = TvltFeatureExtractor()
__lowerCAmelCase = feature_extractor(__a , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) )
__lowerCAmelCase = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __a , atol=1e-4 ) )
| 636 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A : Dict = get_logger(__name__)
A : Dict = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class _UpperCamelCase :
'''simple docstring'''
@add_start_docstrings(__a )
def __call__( self , __a , __a ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _UpperCamelCase :
'''simple docstring'''
@add_start_docstrings(__a )
def __call__( self , __a , __a ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@add_start_docstrings(__a )
def __call__( self , __a , __a , __a , **__a ):
for processor in self:
__lowerCAmelCase = inspect.signature(processor.__call__ ).parameters
if len(__a ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"Make sure that all the required parameters: {list(function_args.keys() )} for "
f"{processor.__class__} are passed to the logits processor." )
__lowerCAmelCase = processor(__a , __a , __a , **__a )
else:
__lowerCAmelCase = processor(__a , __a , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
if not isinstance(__a , __a ) or not (temperature > 0):
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}" )
__lowerCAmelCase = temperature
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = scores / self.temperature
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a = -float("Inf" ) , __a = 1 ):
if not isinstance(__a , __a ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(__a , __a ) or (min_tokens_to_keep < 1):
raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
__lowerCAmelCase = top_p
__lowerCAmelCase = filter_value
__lowerCAmelCase = min_tokens_to_keep
def __call__( self , __a , __a , __a ):
__lowerCAmelCase , __lowerCAmelCase = lax.top_k(__a , scores.shape[-1] )
__lowerCAmelCase = jnp.full_like(__a , self.filter_value )
__lowerCAmelCase = jax.nn.softmax(__a , axis=-1 ).cumsum(axis=-1 )
__lowerCAmelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__lowerCAmelCase = jnp.roll(__a , 1 )
score_mask |= score_mask.at[:, 0].set(__a )
# min tokens to keep
__lowerCAmelCase = score_mask.at[:, : self.min_tokens_to_keep].set(__a )
__lowerCAmelCase = jnp.where(__a , __a , __a )
__lowerCAmelCase = jax.lax.sort_key_val(__a , __a )[-1]
return next_scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a = -float("Inf" ) , __a = 1 ):
if not isinstance(__a , __a ) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}" )
__lowerCAmelCase = max(__a , __a )
__lowerCAmelCase = filter_value
def __call__( self , __a , __a , __a ):
__lowerCAmelCase , __lowerCAmelCase = scores.shape
__lowerCAmelCase = jnp.full(batch_size * vocab_size , self.filter_value )
__lowerCAmelCase = min(self.top_k , scores.shape[-1] ) # Safety check
__lowerCAmelCase , __lowerCAmelCase = lax.top_k(__a , __a )
__lowerCAmelCase = jnp.broadcast_to((jnp.arange(__a ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__lowerCAmelCase = topk_scores.flatten()
__lowerCAmelCase = topk_indices.flatten() + shift
__lowerCAmelCase = next_scores_flat.at[topk_indices_flat].set(__a )
__lowerCAmelCase = next_scores_flat.reshape(__a , __a )
return next_scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = bos_token_id
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = jnp.full(scores.shape , -float("inf" ) )
__lowerCAmelCase = 1 - jnp.bool_(cur_len - 1 )
__lowerCAmelCase = jnp.where(__a , new_scores.at[:, self.bos_token_id].set(0 ) , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = max_length
__lowerCAmelCase = eos_token_id
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = jnp.full(scores.shape , -float("inf" ) )
__lowerCAmelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__lowerCAmelCase = jnp.where(__a , new_scores.at[:, self.eos_token_id].set(0 ) , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a ):
if not isinstance(__a , __a ) or min_length < 0:
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(__a , __a ) or eos_token_id < 0:
raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
__lowerCAmelCase = min_length
__lowerCAmelCase = eos_token_id
def __call__( self , __a , __a , __a ):
# create boolean flag to decide if min length penalty should be applied
__lowerCAmelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__lowerCAmelCase = jnp.where(__a , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = list(__a )
__lowerCAmelCase = begin_index
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = 1 - jnp.bool_(cur_len - self.begin_index )
__lowerCAmelCase = jnp.where(__a , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = list(__a )
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = dict(__a )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__lowerCAmelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__lowerCAmelCase = force_token_array.at[index].set(__a )
__lowerCAmelCase = jnp.intaa(__a )
def __call__( self , __a , __a , __a ):
def _force_token(__a ):
__lowerCAmelCase = scores.shape[0]
__lowerCAmelCase = self.force_token_array[generation_idx]
__lowerCAmelCase = jnp.ones_like(__a , dtype=scores.dtype ) * -float("inf" )
__lowerCAmelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__lowerCAmelCase = lax.dynamic_update_slice(__a , __a , (0, current_token) )
return new_scores
__lowerCAmelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__a ) , lambda: scores , ) , )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a , __a ):
__lowerCAmelCase = generate_config.eos_token_id
__lowerCAmelCase = generate_config.no_timestamps_token_id
__lowerCAmelCase = generate_config.no_timestamps_token_id + 1
__lowerCAmelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__a , "max_initial_timestamp_index" ):
__lowerCAmelCase = generate_config.max_initial_timestamp_index
else:
__lowerCAmelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__lowerCAmelCase = model_config.vocab_size
def __call__( self , __a , __a , __a ):
# suppress <|notimestamps|> which is handled by without_timestamps
__lowerCAmelCase = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(__a , __a ):
__lowerCAmelCase = jnp.where((cur_len - self.begin_index) >= 1 , __a , __a )
__lowerCAmelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __a , )
__lowerCAmelCase = jnp.where((cur_len - self.begin_index) < 2 , __a , __a )
__lowerCAmelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __a , __a , )
return jnp.where(
__a , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , __a , )
__lowerCAmelCase = jax.vmap(__a )(__a , __a )
__lowerCAmelCase = jnp.where(cur_len == self.begin_index , __a , __a )
__lowerCAmelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __a , )
__lowerCAmelCase = self.timestamp_begin + self.max_initial_timestamp_index
__lowerCAmelCase = jnp.where(
__a , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , __a , )
# if sum of probability over timestamps is above any other token, sample timestamp
__lowerCAmelCase = jax.nn.log_softmax(__a , axis=-1 )
def handle_cumulative_probs(__a , __a ):
__lowerCAmelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__lowerCAmelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , __a , )
__lowerCAmelCase = jax.vmap(__a )(__a , __a )
return scores
| 636 | 1 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
A_ = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
A_ = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def A ( _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
__lowerCAmelCase : Tuple = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) ,dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase ,'Please use tf.data to implement this functionality.' )
def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
print('Extracting' ,f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__lowerCAmelCase : List[str] = _readaa(_UpperCAmelCase )
if magic != 2_0_5_1:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__lowerCAmelCase : Optional[Any] = _readaa(_UpperCAmelCase )
__lowerCAmelCase : int = _readaa(_UpperCAmelCase )
__lowerCAmelCase : List[Any] = _readaa(_UpperCAmelCase )
__lowerCAmelCase : Optional[int] = bytestream.read(rows * cols * num_images )
__lowerCAmelCase : Optional[Any] = numpy.frombuffer(_UpperCAmelCase ,dtype=numpy.uinta )
__lowerCAmelCase : Optional[Any] = data.reshape(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,1 )
return data
@deprecated(_UpperCAmelCase ,'Please use tf.one_hot on tensors.' )
def A ( _UpperCAmelCase : str ,_UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = labels_dense.shape[0]
__lowerCAmelCase : List[str] = numpy.arange(_UpperCAmelCase ) * num_classes
__lowerCAmelCase : Union[str, Any] = numpy.zeros((num_labels, num_classes) )
__lowerCAmelCase : Optional[Any] = 1
return labels_one_hot
@deprecated(_UpperCAmelCase ,'Please use tf.data to implement this functionality.' )
def A ( _UpperCAmelCase : Any ,_UpperCAmelCase : Dict=False ,_UpperCAmelCase : Any=1_0 ) -> Optional[int]:
'''simple docstring'''
print('Extracting' ,f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__lowerCAmelCase : Dict = _readaa(_UpperCAmelCase )
if magic != 2_0_4_9:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__lowerCAmelCase : int = _readaa(_UpperCAmelCase )
__lowerCAmelCase : Dict = bytestream.read(_UpperCAmelCase )
__lowerCAmelCase : Union[str, Any] = numpy.frombuffer(_UpperCAmelCase ,dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase ,_UpperCAmelCase )
return labels
class UpperCamelCase__ :
'''simple docstring'''
@deprecated(
SCREAMING_SNAKE_CASE , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=dtypes.floataa , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
__lowerCAmelCase , __lowerCAmelCase : int = random_seed.get_seed(SCREAMING_SNAKE_CASE )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__lowerCAmelCase : Optional[int] = dtypes.as_dtype(SCREAMING_SNAKE_CASE ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__lowerCAmelCase : Tuple = 1_00_00
__lowerCAmelCase : Tuple = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"""images.shape: {images.shape} labels.shape: {labels.shape}"""
__lowerCAmelCase : Optional[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__lowerCAmelCase : Union[str, Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__lowerCAmelCase : Tuple = images.astype(numpy.floataa )
__lowerCAmelCase : Tuple = numpy.multiply(SCREAMING_SNAKE_CASE , 1.0 / 2_5_5.0 )
__lowerCAmelCase : Any = images
__lowerCAmelCase : Any = labels
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : Tuple = 0
@property
def snake_case ( self ) -> Optional[Any]:
return self._images
@property
def snake_case ( self ) -> Any:
return self._labels
@property
def snake_case ( self ) -> Any:
return self._num_examples
@property
def snake_case ( self ) -> Tuple:
return self._epochs_completed
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True ) -> str:
if fake_data:
__lowerCAmelCase : Dict = [1] * 7_84
__lowerCAmelCase : Optional[Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(SCREAMING_SNAKE_CASE )],
[fake_label for _ in range(SCREAMING_SNAKE_CASE )],
)
__lowerCAmelCase : Union[str, Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__lowerCAmelCase : Dict = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = self.images[perma]
__lowerCAmelCase : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__lowerCAmelCase : Tuple = self._num_examples - start
__lowerCAmelCase : Any = self._images[start : self._num_examples]
__lowerCAmelCase : Any = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__lowerCAmelCase : int = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = self.images[perm]
__lowerCAmelCase : int = self.labels[perm]
# Start next epoch
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = batch_size - rest_num_examples
__lowerCAmelCase : Dict = self._index_in_epoch
__lowerCAmelCase : int = self._images[start:end]
__lowerCAmelCase : Optional[int] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__lowerCAmelCase : Dict = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase ,'Please write your own downloading logic.' )
def A ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str:
'''simple docstring'''
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
__lowerCAmelCase : List[Any] = os.path.join(_UpperCAmelCase ,_UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase ,_UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
__lowerCAmelCase : Optional[int] = f.size()
print('Successfully downloaded' ,_UpperCAmelCase ,_UpperCAmelCase ,'bytes.' )
return filepath
@deprecated(
_UpperCAmelCase ,'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def A ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : int=False ,_UpperCAmelCase : Union[str, Any]=False ,_UpperCAmelCase : Optional[Any]=dtypes.floataa ,_UpperCAmelCase : int=True ,_UpperCAmelCase : Dict=5_0_0_0 ,_UpperCAmelCase : Any=None ,_UpperCAmelCase : Optional[Any]=DEFAULT_SOURCE_URL ,) -> str:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] ,[] ,fake_data=_UpperCAmelCase ,one_hot=_UpperCAmelCase ,dtype=_UpperCAmelCase ,seed=_UpperCAmelCase )
__lowerCAmelCase : List[str] = fake()
__lowerCAmelCase : Tuple = fake()
__lowerCAmelCase : Dict = fake()
return _Datasets(train=_UpperCAmelCase ,validation=_UpperCAmelCase ,test=_UpperCAmelCase )
if not source_url: # empty string check
__lowerCAmelCase : Optional[int] = DEFAULT_SOURCE_URL
__lowerCAmelCase : Optional[int] = 'train-images-idx3-ubyte.gz'
__lowerCAmelCase : List[Any] = 'train-labels-idx1-ubyte.gz'
__lowerCAmelCase : Any = 't10k-images-idx3-ubyte.gz'
__lowerCAmelCase : Optional[int] = 't10k-labels-idx1-ubyte.gz'
__lowerCAmelCase : Union[str, Any] = _maybe_download(
_UpperCAmelCase ,_UpperCAmelCase ,source_url + train_images_file )
with gfile.Open(_UpperCAmelCase ,'rb' ) as f:
__lowerCAmelCase : Optional[Any] = _extract_images(_UpperCAmelCase )
__lowerCAmelCase : List[str] = _maybe_download(
_UpperCAmelCase ,_UpperCAmelCase ,source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase ,'rb' ) as f:
__lowerCAmelCase : List[Any] = _extract_labels(_UpperCAmelCase ,one_hot=_UpperCAmelCase )
__lowerCAmelCase : List[Any] = _maybe_download(
_UpperCAmelCase ,_UpperCAmelCase ,source_url + test_images_file )
with gfile.Open(_UpperCAmelCase ,'rb' ) as f:
__lowerCAmelCase : Tuple = _extract_images(_UpperCAmelCase )
__lowerCAmelCase : List[Any] = _maybe_download(
_UpperCAmelCase ,_UpperCAmelCase ,source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase ,'rb' ) as f:
__lowerCAmelCase : Any = _extract_labels(_UpperCAmelCase ,one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
__lowerCAmelCase : Dict = (
'Validation size should be between 0 and '
F"""{len(_UpperCAmelCase )}. Received: {validation_size}."""
)
raise ValueError(_UpperCAmelCase )
__lowerCAmelCase : Union[str, Any] = train_images[:validation_size]
__lowerCAmelCase : Optional[Any] = train_labels[:validation_size]
__lowerCAmelCase : Tuple = train_images[validation_size:]
__lowerCAmelCase : List[Any] = train_labels[validation_size:]
__lowerCAmelCase : Any = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
__lowerCAmelCase : str = _DataSet(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
__lowerCAmelCase : Union[str, Any] = _DataSet(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
__lowerCAmelCase : Optional[int] = _DataSet(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase ,validation=_UpperCAmelCase ,test=_UpperCAmelCase )
| 123 |
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
A_ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
A_ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def A ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[str]=False ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="dummy_doc" ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase : int = {doc: key_lines}
__lowerCAmelCase : Tuple = {doc: sys_lines}
__lowerCAmelCase : Dict = {}
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : int = 0
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase , __lowerCAmelCase : List[Any] = reader.get_doc_mentions(_UpperCAmelCase ,key_doc_lines[doc] ,_UpperCAmelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase : List[Any] = reader.set_annotated_parse_trees(_UpperCAmelCase ,key_doc_lines[doc] ,_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase , __lowerCAmelCase : Tuple = reader.get_doc_mentions(_UpperCAmelCase ,sys_doc_lines[doc] ,_UpperCAmelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase : str = reader.set_annotated_parse_trees(_UpperCAmelCase ,key_doc_lines[doc] ,_UpperCAmelCase ,_UpperCAmelCase )
if remove_nested:
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = reader.remove_nested_coref_mentions(_UpperCAmelCase ,_UpperCAmelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowerCAmelCase , __lowerCAmelCase : int = reader.remove_nested_coref_mentions(_UpperCAmelCase ,_UpperCAmelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowerCAmelCase : int = reader.get_mention_assignments(_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase : int = reader.get_mention_assignments(_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase : List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def A ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ) -> Any:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = get_coref_infos(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase : Dict = {}
__lowerCAmelCase : int = 0
__lowerCAmelCase : Union[str, Any] = 0
for name, metric in metrics:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = evaluator.evaluate_documents(_UpperCAmelCase ,_UpperCAmelCase ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(1_0 ) ,F"""Recall: {recall * 1_0_0:.2f}""" ,F""" Precision: {precision * 1_0_0:.2f}""" ,F""" F1: {fa * 1_0_0:.2f}""" ,)
if conll_subparts_num == 3:
__lowerCAmelCase : Tuple = (conll / 3) * 1_0_0
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def A ( _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
__lowerCAmelCase : Union[str, Any] = line.split()[5]
if not parse_col == "-":
__lowerCAmelCase : Optional[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def snake_case ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Tuple:
__lowerCAmelCase : Optional[Any] = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
__lowerCAmelCase : List[Any] = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowerCAmelCase : List[Any] = evaluate(
key_lines=SCREAMING_SNAKE_CASE , sys_lines=SCREAMING_SNAKE_CASE , metrics=SCREAMING_SNAKE_CASE , NP_only=SCREAMING_SNAKE_CASE , remove_nested=SCREAMING_SNAKE_CASE , keep_singletons=SCREAMING_SNAKE_CASE , min_span=SCREAMING_SNAKE_CASE , )
return score
| 123 | 1 |
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : List[Any] = '''M-CLIP'''
def __init__( self , UpperCamelCase__=1_024 , UpperCamelCase__=768 , **UpperCamelCase__ ):
"""simple docstring"""
a_ = transformerDimSize
a_ = imageDimSize
super().__init__(**UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : str = MCLIPConfig
def __init__( self , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
super().__init__(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
a_ = XLMRobertaModel(UpperCamelCase__ )
a_ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _a ( self , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
a_ = self.transformer(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
a_ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(UpperCamelCase__ ), embs
| 536 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __SCREAMING_SNAKE_CASE (enum.Enum ):
"""simple docstring"""
_a : Tuple = 0
_a : List[str] = 1
_a : int = 2
@add_end_docstrings(__A )
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : str = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
a_ = None
if self.model.config.prefix is not None:
a_ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
a_ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
a_ , a_ , a_ = self._sanitize_parameters(prefix=UpperCamelCase__ , **self._forward_params )
a_ = {**self._preprocess_params, **preprocess_params}
a_ = {**self._forward_params, **forward_params}
def _a ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ):
"""simple docstring"""
a_ = {}
if prefix is not None:
a_ = prefix
if prefix:
a_ = self.tokenizer(
UpperCamelCase__ , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=self.framework )
a_ = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
' [None, \'hole\']' )
a_ = handle_long_generation
preprocess_params.update(UpperCamelCase__ )
a_ = generate_kwargs
a_ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
a_ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
a_ = ReturnType.TENSORS
if return_type is not None:
a_ = return_type
if clean_up_tokenization_spaces is not None:
a_ = clean_up_tokenization_spaces
if stop_sequence is not None:
a_ = self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
a_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _a ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*UpperCamelCase__ , **UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def _a ( self , UpperCamelCase__ , UpperCamelCase__="" , UpperCamelCase__=None , **UpperCamelCase__ ):
"""simple docstring"""
a_ = self.tokenizer(
prefix + prompt_text , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=self.framework )
a_ = prompt_text
if handle_long_generation == "hole":
a_ = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
a_ = generate_kwargs['max_new_tokens']
else:
a_ = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
a_ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
a_ = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
a_ = inputs['attention_mask'][:, -keep_length:]
return inputs
def _a ( self , UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
a_ = model_inputs['input_ids']
a_ = model_inputs.get('attention_mask' , UpperCamelCase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
a_ = None
a_ = None
a_ = 1
else:
a_ = input_ids.shape[0]
a_ = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
a_ = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
a_ = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
a_ = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
a_ = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
a_ = self.model.generate(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ )
a_ = generated_sequence.shape[0]
if self.framework == "pt":
a_ = generated_sequence.reshape(UpperCamelCase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
a_ = tf.reshape(UpperCamelCase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _a ( self , UpperCamelCase__ , UpperCamelCase__=ReturnType.FULL_TEXT , UpperCamelCase__=True ):
"""simple docstring"""
a_ = model_outputs['generated_sequence'][0]
a_ = model_outputs['input_ids']
a_ = model_outputs['prompt_text']
a_ = generated_sequence.numpy().tolist()
a_ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
a_ = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
a_ = self.tokenizer.decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
a_ = 0
else:
a_ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , ) )
if return_type == ReturnType.FULL_TEXT:
a_ = prompt_text + text[prompt_length:]
else:
a_ = text[prompt_length:]
a_ = {'generated_text': all_text}
records.append(UpperCamelCase__ )
return records
| 536 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str]=False ):
try:
a__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a__ = default
else:
# KEY is set, convert it to True or False.
try:
a__ = strtobool(__lowerCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
snake_case : Dict = parse_flag_from_env('''RUN_SLOW''', default=False)
snake_case : str = parse_flag_from_env('''RUN_REMOTE''', default=False)
snake_case : int = parse_flag_from_env('''RUN_LOCAL''', default=True)
snake_case : Dict = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
snake_case : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
snake_case : Union[str, Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
snake_case : List[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
snake_case : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
snake_case : List[str] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
snake_case : Dict = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
snake_case : Tuple = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def __lowercase ( __lowerCAmelCase : List[Any] ):
try:
import faiss # noqa
except ImportError:
a__ = unittest.skip('test requires faiss' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : Optional[int] ):
try:
import regex # noqa
except ImportError:
a__ = unittest.skip('test requires regex' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : int ):
try:
import elasticsearch # noqa
except ImportError:
a__ = unittest.skip('test requires elasticsearch' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : int ):
try:
import sqlalchemy # noqa
except ImportError:
a__ = unittest.skip('test requires sqlalchemy' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
if not config.TORCH_AVAILABLE:
a__ = unittest.skip('test requires PyTorch' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : Tuple ):
if not config.TF_AVAILABLE:
a__ = unittest.skip('test requires TensorFlow' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : Tuple ):
if not config.JAX_AVAILABLE:
a__ = unittest.skip('test requires JAX' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
if not config.PIL_AVAILABLE:
a__ = unittest.skip('test requires Pillow' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__lowerCAmelCase )
else:
return test_case
def __lowercase ( __lowerCAmelCase : List[Any] ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__lowerCAmelCase )
else:
return test_case
def __lowercase ( __lowerCAmelCase : Dict ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__lowerCAmelCase )
else:
return test_case
def __lowercase ( __lowerCAmelCase : Any ):
def _require_spacy_model(__lowerCAmelCase : Any ):
try:
import spacy # noqa F401
spacy.load(__lowerCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__lowerCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__lowerCAmelCase ) )(__lowerCAmelCase )
else:
return test_case
return _require_spacy_model
def __lowercase ( __lowerCAmelCase : Dict ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__lowerCAmelCase )
else:
return test_case
def __lowercase ( __lowerCAmelCase : Tuple ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__lowerCAmelCase )
else:
return test_case
def __lowercase ( __lowerCAmelCase : Dict ):
if not _run_slow_tests or _run_slow_tests == 0:
a__ = unittest.skip('test is slow' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : Any ):
if not _run_local_tests or _run_local_tests == 0:
a__ = unittest.skip('test is local' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : str ):
if not _run_packaged_tests or _run_packaged_tests == 0:
a__ = unittest.skip('test is packaged' )(__lowerCAmelCase )
return test_case
def __lowercase ( __lowerCAmelCase : Tuple ):
if not _run_remote_tests or _run_remote_tests == 0:
a__ = unittest.skip('test requires remote' )(__lowerCAmelCase )
return test_case
def __lowercase ( *__lowerCAmelCase : int ):
def decorate(cls : Tuple ):
for name, fn in cls.__dict__.items():
if callable(__lowerCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
a__ = decorator(__lowerCAmelCase )
setattr(cls , __lowerCAmelCase , __lowerCAmelCase )
return cls
return decorate
class snake_case_ (lowerCamelCase_ ):
pass
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : List[Any] = 2
@contextmanager
def __lowercase ( __lowerCAmelCase : Optional[int]=OfflineSimulationMode.CONNECTION_FAILS , __lowerCAmelCase : str=1E-1_6 ):
a__ = requests.Session().request
def timeout_request(__lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , **__lowerCAmelCase : List[str] ):
# Change the url to an invalid url so that the connection hangs
a__ = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
a__ = timeout
try:
return online_request(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
a__ = url
a__ = e.args[0]
a__ = (max_retry_error.args[0].replace('10.255.255.1' , F'OfflineMock[{url}]' ),)
a__ = (max_retry_error,)
raise
def raise_connection_error(__lowerCAmelCase : Any , __lowerCAmelCase : str , **__lowerCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__lowerCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __lowerCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __lowerCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def __lowercase ( *__lowerCAmelCase : str , **__lowerCAmelCase : Dict ):
a__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__lowerCAmelCase , **__lowerCAmelCase ) as tmp_dir:
try:
os.chdir(__lowerCAmelCase )
yield
finally:
os.chdir(__lowerCAmelCase )
@contextmanager
def __lowercase ( ):
import gc
gc.collect()
a__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __lowercase ( ):
import gc
gc.collect()
a__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
return deepcopy(__lowerCAmelCase ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(__lowerCAmelCase ).integers(0 , 1_0_0 , 1_0 ).tolist()
def __lowercase ( __lowerCAmelCase : Any ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__lowerCAmelCase : Any , *__lowerCAmelCase : str , **__lowerCAmelCase : int ):
try:
return func(*__lowerCAmelCase , **__lowerCAmelCase )
except HTTPError as err:
if str(__lowerCAmelCase ).startswith('500' ) or str(__lowerCAmelCase ).startswith('502' ):
pytest.xfail(str(__lowerCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __lowerCAmelCase )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :Dict ,__snake_case :int ,__snake_case :Union[str, Any] ) -> Dict:
a__ = returncode
a__ = stdout
a__ = stderr
async def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
while True:
a__ = await stream.readline()
if line:
callback(__lowerCAmelCase )
else:
break
async def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : int=False , __lowerCAmelCase : Dict=False ):
if echo:
print('\nRunning: ' , ' '.join(__lowerCAmelCase ) )
a__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a__ = []
a__ = []
def tee(__lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any]="" ):
a__ = line.decode('utf-8' ).rstrip()
sink.append(__lowerCAmelCase )
if not quiet:
print(__lowerCAmelCase , __lowerCAmelCase , file=__lowerCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __lowerCAmelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __lowerCAmelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__lowerCAmelCase , )
return _RunOutput(await p.wait() , __lowerCAmelCase , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Optional[Any]=1_8_0 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Tuple=True ):
a__ = asyncio.get_event_loop()
a__ = loop.run_until_complete(
_stream_subprocess(__lowerCAmelCase , env=__lowerCAmelCase , stdin=__lowerCAmelCase , timeout=__lowerCAmelCase , quiet=__lowerCAmelCase , echo=__lowerCAmelCase ) )
a__ = ' '.join(__lowerCAmelCase )
if result.returncode > 0:
a__ = '\n'.join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'\'{cmd_str}\' produced no output.' )
return result
def __lowercase ( ):
a__ = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
a__ = re.sub(R'^gw' , '' , __lowerCAmelCase , 0 , re.M )
return int(__lowerCAmelCase )
def __lowercase ( ):
a__ = 2_9_5_0_0
a__ = pytest_xdist_worker_id()
return port + uniq_delta
| 720 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 0 |
import string
def A ( snake_case__ : List[str] ) -> int:
'''simple docstring'''
__snake_case = ''
for i in sequence:
__snake_case = ord(_A )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def A ( snake_case__ : str ) -> Any:
'''simple docstring'''
__snake_case = string.ascii_letters
__snake_case = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_A )] if c in letters else c for c in sequence )
def A ( ) -> Tuple:
'''simple docstring'''
from timeit import timeit
print('Running performance benchmarks...' )
__snake_case = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(f"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=_A )} seconds" )
print(f"> atbash(): {timeit('atbash(printable)' , setup=_A )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 313 | import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_: Optional[Any] = logging.get_logger(__name__)
A_: Union[str, Any] = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'deformable_detr'
lowerCAmelCase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=3 , UpperCAmelCase=300 , UpperCAmelCase=1024 , UpperCAmelCase=6 , UpperCAmelCase=1024 , UpperCAmelCase=8 , UpperCAmelCase=6 , UpperCAmelCase=1024 , UpperCAmelCase=8 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase="relu" , UpperCAmelCase=256 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1.0 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="sine" , UpperCAmelCase="resnet50" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=False , UpperCAmelCase=300 , UpperCAmelCase=False , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=1 , UpperCAmelCase=5 , UpperCAmelCase=2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.25 , UpperCAmelCase=False , **UpperCAmelCase , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_lowercase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase = backbone_config.get("""model_type""" )
_lowercase = CONFIG_MAPPING[backbone_model_type]
_lowercase = config_class.from_dict(UpperCAmelCase )
_lowercase = use_timm_backbone
_lowercase = backbone_config
_lowercase = num_channels
_lowercase = num_queries
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = init_xavier_std
_lowercase = encoder_layerdrop
_lowercase = auxiliary_loss
_lowercase = position_embedding_type
_lowercase = backbone
_lowercase = use_pretrained_backbone
_lowercase = dilation
# deformable attributes
_lowercase = num_feature_levels
_lowercase = encoder_n_points
_lowercase = decoder_n_points
_lowercase = two_stage
_lowercase = two_stage_num_proposals
_lowercase = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
_lowercase = class_cost
_lowercase = bbox_cost
_lowercase = giou_cost
# Loss coefficients
_lowercase = mask_loss_coefficient
_lowercase = dice_loss_coefficient
_lowercase = bbox_loss_coefficient
_lowercase = giou_loss_coefficient
_lowercase = eos_coefficient
_lowercase = focal_alpha
_lowercase = disable_custom_kernels
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_lowercase = self.backbone_config.to_dict()
_lowercase = self.__class__.model_type
return output
| 398 | 0 |
"""simple docstring"""
import re
from ..utils import cached_file
# docstyle-ignore
UpperCAmelCase__ ="\nHuman: <<task>>\n\nAssistant: "
UpperCAmelCase__ ="huggingface-tools/default-prompts"
UpperCAmelCase__ ={"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any]="run" ):
"""simple docstring"""
if prompt_or_repo_id is None:
__lowercase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , UpperCamelCase__ ) is not None:
return prompt_or_repo_id
__lowercase = cached_file(
UpperCamelCase__ , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(UpperCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
return f.read()
| 714 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
def get_matched_characters(UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
__lowercase = []
__lowercase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__lowercase = int(max(0 , i - limit ) )
__lowercase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCamelCase__ )
__lowercase = f'''{_stra[0:_stra.index(UpperCamelCase__ )]} {_stra[_stra.index(UpperCamelCase__ ) + 1:]}'''
return "".join(UpperCamelCase__ )
# matching characters
__lowercase = get_matched_characters(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = get_matched_characters(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = len(UpperCamelCase__ )
# transposition
__lowercase = (
len([(ca, ca) for ca, ca in zip(UpperCamelCase__ , UpperCamelCase__ ) if ca != ca] ) // 2
)
if not match_count:
__lowercase = 0.0
else:
__lowercase = (
1
/ 3
* (
match_count / len(UpperCamelCase__ )
+ match_count / len(UpperCamelCase__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__lowercase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 442 | 0 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCAmelCase_ = re.compile(R'\s+')
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
return {"hash": hashlib.mda(re.sub(__lowerCamelCase , '''''' , example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
lowercase__ : Any = [len(__lowerCamelCase ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(__lowerCamelCase ), "line_max": max(__lowerCamelCase )}
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
lowercase__ : Union[str, Any] = np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=5 ) -> Dict:
lowercase__ : Dict = ['''auto-generated''', '''autogenerated''', '''automatically generated''']
lowercase__ : List[str] = example['''content'''].splitlines()
for _, line in zip(range(__lowerCamelCase ) , __lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=5 , __lowerCamelCase=0.0_5 ) -> Any:
lowercase__ : Any = ['''unit tests''', '''test file''', '''configuration file''']
lowercase__ : Optional[Any] = example['''content'''].splitlines()
lowercase__ : Tuple = 0
lowercase__ : str = 0
# first test
for _, line in zip(range(__lowerCamelCase ) , __lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowercase__ : Union[str, Any] = example['''content'''].count('''\n''' )
lowercase__ : Optional[int] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : Optional[int] = ['''def ''', '''class ''', '''for ''', '''while ''']
lowercase__ : Union[str, Any] = example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=4 ) -> Dict:
lowercase__ : int = example['''content'''].splitlines()
lowercase__ : List[str] = 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]:
lowercase__ : Dict = tokenizer(example['''content'''] , truncation=__lowerCamelCase )['''input_ids''']
lowercase__ : Dict = len(example['''content'''] ) / len(__lowerCamelCase )
return {"ratio": ratio}
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
lowercase__ : Union[str, Any] = {}
results.update(get_hash(__lowerCamelCase ) )
results.update(line_stats(__lowerCamelCase ) )
results.update(alpha_stats(__lowerCamelCase ) )
results.update(char_token_ratio(__lowerCamelCase ) )
results.update(is_autogenerated(__lowerCamelCase ) )
results.update(is_config_or_test(__lowerCamelCase ) )
results.update(has_no_keywords(__lowerCamelCase ) )
results.update(has_few_assignments(__lowerCamelCase ) )
return results
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
if not check_uniques(__lowerCamelCase , __lowerCamelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
with open(__lowerCamelCase , '''rb''' ) as f_in:
with gzip.open(str(__lowerCamelCase ) + '''.gz''' , '''wb''' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowerCamelCase , __lowerCamelCase )
os.unlink(__lowerCamelCase )
# Settings
lowerCAmelCase_ = HfArgumentParser(PreprocessingArguments)
lowerCAmelCase_ = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase_ = multiprocessing.cpu_count()
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = load_dataset(args.dataset_name, split='train')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
lowerCAmelCase_ = set(ds.unique('hash'))
lowerCAmelCase_ = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCAmelCase_ = time.time()
lowerCAmelCase_ ,lowerCAmelCase_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
lowerCAmelCase_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
lowerCAmelCase_ = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
lowerCAmelCase_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCAmelCase_ = str(data_dir / F'''file-{file_number+1:012}.json''')
lowerCAmelCase_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 560 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Tuple = "poolformer"
def __init__( self : Dict ,_snake_case : Optional[Any]=3 ,_snake_case : Optional[int]=16 ,_snake_case : List[Any]=16 ,_snake_case : List[str]=3 ,_snake_case : List[str]=4.0 ,_snake_case : int=[2, 2, 6, 2] ,_snake_case : Union[str, Any]=[64, 128, 320, 512] ,_snake_case : Any=[7, 3, 3, 3] ,_snake_case : Optional[int]=[4, 2, 2, 2] ,_snake_case : Dict=[2, 1, 1, 1] ,_snake_case : int=4 ,_snake_case : Any=0.0 ,_snake_case : str="gelu" ,_snake_case : int=True ,_snake_case : List[Any]=1e-5 ,_snake_case : str=0.02 ,**_snake_case : Tuple ,) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = num_channels
lowercase__ : Optional[Any] = patch_size
lowercase__ : Optional[Any] = stride
lowercase__ : Optional[Any] = padding
lowercase__ : Optional[Any] = pool_size
lowercase__ : List[Any] = hidden_sizes
lowercase__ : Dict = mlp_ratio
lowercase__ : Any = depths
lowercase__ : Tuple = patch_sizes
lowercase__ : Dict = strides
lowercase__ : Optional[Any] = num_encoder_blocks
lowercase__ : str = drop_path_rate
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Optional[int] = use_layer_scale
lowercase__ : Dict = layer_scale_init_value
lowercase__ : Dict = initializer_range
super().__init__(**_snake_case )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Any = version.parse("1.11" )
@property
def UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase ( self : List[str] ) -> float:
"""simple docstring"""
return 2e-3
| 560 | 1 |
'''simple docstring'''
import sys
SCREAMING_SNAKE_CASE_: Optional[int] =(
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCAmelCase_ ( snake_case_ : str = N ) -> int:
'''simple docstring'''
UpperCAmelCase_ = -sys.maxsize - 1
for i in range(len(snake_case_ ) - 12 ):
UpperCAmelCase_ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
UpperCAmelCase_ = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 705 | '''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_: Tuple ={'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[str] =[
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 415 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''vocab.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
SCREAMING_SNAKE_CASE__ = {'''mgp-str''': 2_7}
class a_ ( __a ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="[GO]" , _SCREAMING_SNAKE_CASE="[GO]" , _SCREAMING_SNAKE_CASE="[s]" , _SCREAMING_SNAKE_CASE="[GO]" , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase = json.load(lowerCAmelCase__ )
UpperCamelCase = {v: k for k, v in self.vocab.items()}
@property
def A__ ( self ) -> int:
"""simple docstring"""
return len(self.vocab )
def A__ ( self ) -> str:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = []
for s in text:
char_tokens.extend(lowerCAmelCase__ )
return char_tokens
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCAmelCase__ ) )
return
UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
return (vocab_file,)
| 301 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( __a ):
'''simple docstring'''
UpperCamelCase__ : Dict = '''char'''
UpperCamelCase__ : List[Any] = '''bpe'''
UpperCamelCase__ : Any = '''wp'''
lowerCAmelCase : Tuple = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class SCREAMING_SNAKE_CASE__ ( __a ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = ['''image_processor''', '''char_tokenizer''']
UpperCamelCase__ : int = '''ViTImageProcessor'''
UpperCamelCase__ : Optional[int] = '''MgpstrTokenizer'''
def __init__( self : int , lowerCAmelCase__ : str=None , lowerCAmelCase__ : int=None , **lowerCAmelCase__ : List[str] ) -> Tuple:
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase__ , )
snake_case__ = kwargs.pop("""feature_extractor""" )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
snake_case__ = tokenizer
snake_case__ = AutoTokenizer.from_pretrained("""gpt2""" )
snake_case__ = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self : str , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Union[str, Any]=None , **lowerCAmelCase__ : str ) -> List[str]:
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
snake_case__ = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
snake_case__ = self.char_tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings["""input_ids"""]
return inputs
def UpperCAmelCase_ ( self : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
snake_case__ , snake_case__ , snake_case__ = sequences
snake_case__ = char_preds.size(0 )
snake_case__ , snake_case__ = self._decode_helper(lowerCAmelCase__ , """char""" )
snake_case__ , snake_case__ = self._decode_helper(lowerCAmelCase__ , """bpe""" )
snake_case__ , snake_case__ = self._decode_helper(lowerCAmelCase__ , """wp""" )
snake_case__ = []
snake_case__ = []
for i in range(lowerCAmelCase__ ):
snake_case__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case__ = scores.index(max(lowerCAmelCase__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case__ = {}
snake_case__ = final_strs
snake_case__ = final_scores
snake_case__ = char_strs
snake_case__ = bpe_strs
snake_case__ = wp_strs
return out
def UpperCAmelCase_ ( self : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> Dict:
if format == DecodeType.CHARACTER:
snake_case__ = self.char_decode
snake_case__ = 1
snake_case__ = """[s]"""
elif format == DecodeType.BPE:
snake_case__ = self.bpe_decode
snake_case__ = 2
snake_case__ = """#"""
elif format == DecodeType.WORDPIECE:
snake_case__ = self.wp_decode
snake_case__ = 102
snake_case__ = """[SEP]"""
else:
raise ValueError(f'''Format {format} is not supported.''' )
snake_case__ , snake_case__ = [], []
snake_case__ = pred_logits.size(0 )
snake_case__ = pred_logits.size(1 )
snake_case__ , snake_case__ = pred_logits.topk(1 , dim=-1 , largest=lowerCAmelCase__ , sorted=lowerCAmelCase__ )
snake_case__ = preds_index.view(-1 , lowerCAmelCase__ )[:, 1:]
snake_case__ = decoder(lowerCAmelCase__ )
snake_case__ , snake_case__ = torch.nn.functional.softmax(lowerCAmelCase__ , dim=2 ).max(dim=2 )
snake_case__ = preds_max_prob[:, 1:]
for index in range(lowerCAmelCase__ ):
snake_case__ = preds_str[index].find(lowerCAmelCase__ )
snake_case__ = preds_str[index][:pred_eos]
snake_case__ = preds_index[index].cpu().tolist()
snake_case__ = pred_index.index(lowerCAmelCase__ ) if eos_token in pred_index else -1
snake_case__ = preds_max_prob[index][: pred_eos_index + 1]
snake_case__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCAmelCase__ )
conf_scores.append(lowerCAmelCase__ )
return dec_strs, conf_scores
def UpperCAmelCase_ ( self : List[str] , lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
snake_case__ = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(lowerCAmelCase__ )]
return decode_strs
def UpperCAmelCase_ ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
return self.bpe_tokenizer.batch_decode(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , lowerCAmelCase__ : Tuple ) -> str:
snake_case__ = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(lowerCAmelCase__ )]
return decode_strs
| 214 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : List[str] = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure) | 713 |
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw')) | 267 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowercase_ = ["gpt2"]
lowercase_ = "gpt2"
if is_tf_available():
class __A ( tf.Module ):
'''simple docstring'''
def __init__(self , A ) -> List[Any]:
"""simple docstring"""
super().__init__()
_a = tokenizer
_a = AutoConfig.from_pretrained(A )
_a = TFGPTaLMHeadModel.from_config(A )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def a__ (self , A ) -> Optional[Any]:
"""simple docstring"""
_a = self.tokenizer(A )
_a = tokenized['''input_ids'''].to_tensor()
_a = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_a = self.model(input_ids=A , attention_mask=A )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Any:
"""simple docstring"""
super().setUp()
_a = [GPTaTokenizer.from_pretrained(A ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_a = [TFGPTaTokenizer.from_pretrained(A ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_a = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
_a = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def a__ (self ) -> Any:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_a = tokenizer([test_inputs] , return_tensors='''tf''' )
_a = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_a = python_outputs[key].numpy()
_a = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(A , tf.intaa ) == tf_outputs_values ) )
@slow
def a__ (self ) -> int:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_a = tf.function(A )
for test_inputs in self.test_sentences:
_a = tf.constant(A )
_a = compiled_tokenizer(A )
_a = tf_tokenizer(A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def a__ (self ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_a = ModelToSave(tokenizer=A )
_a = tf.convert_to_tensor([self.test_sentences[0]] )
_a = model.serving(A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_a = Path(A ) / '''saved.model'''
tf.saved_model.save(A , A , signatures={'''serving_default''': model.serving} )
_a = tf.saved_model.load(A )
_a = loaded_model.signatures['''serving_default'''](A )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def a__ (self ) -> List[Any]:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
_a = tf.convert_to_tensor([self.test_sentences[0]] )
_a = tf_tokenizer(A ) # Build model with some sample inputs
_a = tf_tokenizer.get_config()
_a = TFGPTaTokenizer.from_config(A )
_a = model_from_config(A )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def a__ (self ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_a = 123_123
for max_length in [3, 5, 1_024]:
_a = tf.convert_to_tensor([self.test_sentences[0]] )
_a = tf_tokenizer(A , max_length=A )
_a = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 11 |
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=None, **snake_case_ ) -> Union[str, Any]:
A__ : Optional[Any] =[x.strip() for x in open(snake_case_ ).readlines()]
A__ : List[Any] =[x.strip() for x in open(snake_case_ ).readlines()][: len(snake_case_ )]
A__ : Tuple =calculate_rouge(snake_case_, snake_case_, **snake_case_ )
if save_path is not None:
save_json(snake_case_, snake_case_, indent=snake_case_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 416 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__ : Any =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any ={
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _UpperCAmelCase ( a_ , a_ ):
"""simple docstring"""
__snake_case = """dinat"""
__snake_case = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , _lowercase=4 , _lowercase=3 , _lowercase=64 , _lowercase=[3, 4, 6, 5] , _lowercase=[2, 4, 8, 16] , _lowercase=7 , _lowercase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _lowercase=3.0 , _lowercase=True , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase="gelu" , _lowercase=0.02 , _lowercase=1E-5 , _lowercase=0.0 , _lowercase=None , _lowercase=None , **_lowercase , ) -> Any:
super().__init__(**_lowercase )
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : int = embed_dim
_lowerCamelCase : str = depths
_lowerCamelCase : Tuple = len(_lowercase )
_lowerCamelCase : Any = num_heads
_lowerCamelCase : Optional[int] = kernel_size
_lowerCamelCase : List[str] = dilations
_lowerCamelCase : Optional[int] = mlp_ratio
_lowerCamelCase : Optional[int] = qkv_bias
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = drop_path_rate
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCamelCase : Optional[int] = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
_lowerCamelCase : Optional[Any] = layer_scale_init_value
_lowerCamelCase : Optional[int] = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(_lowercase ) + 1 )]
_lowerCamelCase, _lowerCamelCase : int = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 558 | """simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : Optional[Any] = '''laion/clap-htsat-unfused'''
_lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
def a__ ( self , **_lowercase ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **_lowercase )
def a__ ( self , **_lowercase ) -> str:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_lowercase )
def a__ ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> str:
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_feature_extractor()
_lowerCamelCase : Optional[int] = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowercase )
def a__ ( self ) -> Union[str, Any]:
_lowerCamelCase : List[Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_lowerCamelCase : Dict = self.get_feature_extractor(do_normalize=_lowercase , padding_value=1.0 )
_lowerCamelCase : Optional[int] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowercase )
def a__ ( self ) -> int:
_lowerCamelCase : Any = self.get_feature_extractor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
_lowerCamelCase : List[Any] = floats_list((3, 1000) )
_lowerCamelCase : Optional[int] = feature_extractor(_lowercase , return_tensors='''np''' )
_lowerCamelCase : Optional[Any] = processor(audios=_lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : Dict = self.get_feature_extractor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[Any] = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
_lowerCamelCase : Dict = '''This is a test string'''
_lowerCamelCase : Dict = processor(text=_lowercase )
_lowerCamelCase : Any = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a__ ( self ) -> List[str]:
_lowerCamelCase : List[Any] = self.get_feature_extractor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Tuple = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
_lowerCamelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Dict = processor.batch_decode(_lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def a__ ( self ) -> List[Any]:
_lowerCamelCase : str = self.get_feature_extractor()
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = ClapProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
| 558 | 1 |
'''simple docstring'''
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
snake_case : Union[str, Any] = arr.split(''',''' )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
snake_case : Tuple = [int(self.array[0] )] * len(self.array )
snake_case : List[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
snake_case : Union[str, Any] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
snake_case : Dict = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowercase__ = input("please input some numbers:")
lowercase__ = SubArray(whole_array)
lowercase__ = array.solve_sub_array()
print(("the results is:", re))
| 638 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowercase__ = logging.get_logger(__name__)
enable_full_determinism()
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = UNetaDModel
lowerCamelCase = """sample"""
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = 4
snake_case : List[Any] = 3
snake_case : str = (32, 32)
snake_case : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
snake_case : Tuple = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case : Any = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
snake_case : Dict = self.dummy_input
return init_dict, inputs_dict
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = UNetaDModel
lowerCamelCase = """sample"""
@property
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
snake_case : str = 4
snake_case : Tuple = 4
snake_case : str = (32, 32)
snake_case : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
snake_case : str = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return (4, 32, 32)
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
snake_case : str = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
snake_case ,snake_case : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(UpperCamelCase__ )
snake_case : List[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
snake_case ,snake_case : str = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCamelCase__ )
model.to(UpperCamelCase__ )
snake_case : Any = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
snake_case ,snake_case : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCamelCase__ )
model_accelerate.to(UpperCamelCase__ )
model_accelerate.eval()
snake_case : Optional[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case : int = noise.to(UpperCamelCase__ )
snake_case : List[str] = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
snake_case : Union[str, Any] = model_accelerate(UpperCamelCase__ , UpperCamelCase__ )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case ,snake_case : List[Any] = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=UpperCamelCase__ , low_cpu_mem_usage=UpperCamelCase__ )
model_normal_load.to(UpperCamelCase__ )
model_normal_load.eval()
snake_case : Union[str, Any] = model_normal_load(UpperCamelCase__ , UpperCamelCase__ )['''sample''']
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case : int = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(UpperCamelCase__ )
snake_case : Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case : Dict = noise.to(UpperCamelCase__ )
snake_case : Any = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
with torch.no_grad():
snake_case : Union[str, Any] = model(UpperCamelCase__ , UpperCamelCase__ ).sample
snake_case : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case : Tuple = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 ) )
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = UNetaDModel
lowerCamelCase = """sample"""
@property
def lowerCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any]=(32, 32) ) -> Any:
"""simple docstring"""
snake_case : Optional[Any] = 4
snake_case : Tuple = 3
snake_case : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
snake_case : str = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
snake_case : Dict = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
snake_case : Dict = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
snake_case ,snake_case : str = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(UpperCamelCase__ )
snake_case : str = self.dummy_input
snake_case : Dict = floats_tensor((4, 3) + (256, 256) ).to(UpperCamelCase__ )
snake_case : Union[str, Any] = noise
snake_case : List[Any] = model(**UpperCamelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case : int = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(UpperCamelCase__ )
snake_case : Dict = 4
snake_case : Optional[int] = 3
snake_case : Tuple = (256, 256)
snake_case : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
snake_case : List[str] = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
snake_case : List[str] = model(UpperCamelCase__ , UpperCamelCase__ ).sample
snake_case : Dict = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case : Optional[int] = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
snake_case : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(UpperCamelCase__ )
snake_case : Optional[Any] = 4
snake_case : List[Any] = 3
snake_case : Union[str, Any] = (32, 32)
snake_case : Any = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
snake_case : Any = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
snake_case : Dict = model(UpperCamelCase__ , UpperCamelCase__ ).sample
snake_case : Optional[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case : int = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
| 638 | 1 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Any = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : Dict = direct_transformers_import(PATH_TO_TRANSFORMERS)
A_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
A_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> List[str]:
"""simple docstring"""
lowercase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'config.{attribute}' in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
lowercase = True
# Deal with multi-line cases
elif (
re.search(
Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"', snake_case__, )
is not None
):
lowercase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowercase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowercase = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
lowercase = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
lowercase = True
if not attribute_used:
lowercase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowercase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowercase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowercase = True
elif attribute.endswith('''_token_id''' ):
lowercase = True
# configuration class specific cases
if not case_allowed:
lowercase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [] )
lowercase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __UpperCAmelCase ( UpperCAmelCase )-> List[str]:
"""simple docstring"""
lowercase = dict(inspect.signature(config_class.__init__ ).parameters )
lowercase = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
lowercase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowercase = {}
if len(config_class.attribute_map ) > 0:
lowercase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowercase = inspect.getsourcefile(snake_case__ )
lowercase = os.path.dirname(snake_case__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowercase = [os.path.join(snake_case__, snake_case__ ) for fn in os.listdir(snake_case__ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
lowercase = []
for path in modeling_paths:
if os.path.isfile(snake_case__ ):
with open(snake_case__ ) as fp:
modeling_sources.append(fp.read() )
lowercase = []
for config_param, default_value in zip(snake_case__, snake_case__ ):
# `attributes` here is all the variant names for `config_param`
lowercase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(snake_case__, snake_case__, snake_case__, snake_case__ ):
unused_attributes.append(attributes[0] )
return sorted(snake_case__ )
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
lowercase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowercase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ), lambda UpperCAmelCase : inspect.isclass(snake_case__ )
and issubclass(snake_case__, snake_case__ )
and inspect.getmodule(snake_case__ ) == inspect.getmodule(_config_class ), )
]
for config_class in config_classes_in_module:
lowercase = check_config_attributes_being_used(snake_case__ )
if len(snake_case__ ) > 0:
lowercase = unused_attributes
if len(snake_case__ ) > 0:
lowercase = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f'{name}: {attributes}\n'
raise ValueError(snake_case__ )
if __name__ == "__main__":
check_config_attributes()
| 717 | from __future__ import annotations
from math import pi
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 479 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def _UpperCAmelCase ( a__ = 1_0_0_0_0_0_0 , a__ = 1_0):
'''simple docstring'''
a_ : defaultdict = defaultdict(a__)
for outer_width in range(3 , (t_limit // 4) + 2):
if outer_width * outer_width > t_limit:
a_ : Dict = max(
ceil(sqrt(outer_width * outer_width - t_limit)) , 1)
else:
a_ : str = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(a__ , outer_width - 1 , 2):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0)
if __name__ == "__main__":
print(F"""{solution() = }""")
| 540 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A__(a_ ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowercase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_lowercase , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(_lowercase , """num_encoder_blocks""" ) )
class A__:
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=64 , _lowercase=3 , _lowercase=4 , _lowercase=[2, 2, 2, 2] , _lowercase=[8, 4, 2, 1] , _lowercase=[16, 32, 64, 128] , _lowercase=[1, 4, 8, 16] , _lowercase=[1, 2, 4, 8] , _lowercase=True , _lowercase=True , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0_2 , _lowercase=3 , _lowercase=None , ) -> Any:
a_ : List[str] = parent
a_ : int = batch_size
a_ : Dict = image_size
a_ : Any = num_channels
a_ : Optional[int] = num_encoder_blocks
a_ : Optional[Any] = sr_ratios
a_ : List[str] = depths
a_ : int = hidden_sizes
a_ : List[Any] = downsampling_rates
a_ : List[Any] = num_attention_heads
a_ : Any = is_training
a_ : List[Any] = use_labels
a_ : List[Any] = hidden_act
a_ : Dict = hidden_dropout_prob
a_ : Any = attention_probs_dropout_prob
a_ : List[str] = initializer_range
a_ : Dict = num_labels
a_ : Union[str, Any] = scope
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : int = None
if self.use_labels:
a_ : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a_ : Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> Any:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
a_ : Optional[Any] = SegformerModel(config=_lowercase )
model.to(_lowercase )
model.eval()
a_ : List[str] = model(_lowercase )
a_ : int = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
a_ : Any = self.num_labels
a_ : Union[str, Any] = SegformerForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
a_ : str = model(_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
a_ : Optional[Any] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]:
a_ : int = 1
a_ : Union[str, Any] = SegformerForSemanticSegmentation(config=_lowercase )
model.to(_lowercase )
model.eval()
a_ : int = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_lowercase )
a_ : Optional[int] = model(_lowercase , labels=_lowercase )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : Union[str, Any] = self.prepare_config_and_inputs()
a_ , a_ , a_ : Optional[int] = config_and_inputs
a_ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__(a_, a_, unittest.TestCase ):
"""simple docstring"""
_A : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_A : List[Any] = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Optional[int] = True
_A : Optional[Any] = False
_A : int = False
_A : Optional[int] = False
def UpperCamelCase__ ( self ) -> Dict:
a_ : str = SegformerModelTester(self )
a_ : Optional[int] = SegformerConfigTester(self , config_class=_lowercase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCamelCase__ ( self ) -> int:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_lowercase )
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_lowercase )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def UpperCamelCase__ ( self ) -> List[Any]:
pass
def UpperCamelCase__ ( self ) -> Dict:
a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : int = model_class(_lowercase )
a_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Dict = [*signature.parameters.keys()]
a_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ , a_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Optional[int] = True
for model_class in self.all_model_classes:
a_ : List[Any] = True
a_ : Optional[Any] = False
a_ : Tuple = True
a_ : Any = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
a_ : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) )
a_ : Union[str, Any] = outputs.attentions
a_ : Optional[int] = sum(self.model_tester.depths )
self.assertEqual(len(_lowercase ) , _lowercase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a_ : Dict = True
a_ : List[str] = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
a_ : int = model(**self._prepare_for_class(_lowercase , _lowercase ) )
a_ : Optional[Any] = outputs.attentions
self.assertEqual(len(_lowercase ) , _lowercase )
# verify the first attentions (first block, first layer)
a_ : List[str] = (self.model_tester.image_size // 4) ** 2
a_ : int = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
a_ : int = (self.model_tester.image_size // 32) ** 2
a_ : Optional[int] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
a_ : Dict = len(_lowercase )
# Check attention is always last and order is fine
a_ : List[str] = True
a_ : List[Any] = True
a_ : int = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
a_ : int = model(**self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + 1 , len(_lowercase ) )
a_ : Optional[int] = outputs.attentions
self.assertEqual(len(_lowercase ) , _lowercase )
# verify the first attentions (first block, first layer)
a_ : int = (self.model_tester.image_size // 4) ** 2
a_ : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCamelCase__ ( self ) -> Optional[int]:
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
a_ : Tuple = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
a_ : Tuple = model(**self._prepare_for_class(_lowercase , _lowercase ) )
a_ : Optional[Any] = outputs.hidden_states
a_ : Optional[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(_lowercase ) , _lowercase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[Any] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> Optional[Any]:
if not self.model_tester.is_training:
return
a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
a_ : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowercase ):
continue
a_ : Union[str, Any] = model_class(_lowercase )
model.to(_lowercase )
model.train()
a_ : List[str] = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
a_ : List[Any] = model(**_lowercase ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
pass
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Union[str, Any] = SegformerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
class A__(unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> List[str]:
# only resize + normalize
a_ : Tuple = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowercase , align=_lowercase , do_random_crop=_lowercase )
a_ : Dict = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
_lowercase )
a_ : str = prepare_img()
a_ : str = image_processor(images=_lowercase , return_tensors="""pt""" )
a_ : List[str] = encoded_inputs.pixel_values.to(_lowercase )
with torch.no_grad():
a_ : Union[str, Any] = model(_lowercase )
a_ : Tuple = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _lowercase )
a_ : Any = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _lowercase , atol=1e-4 ) )
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
# only resize + normalize
a_ : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowercase , align=_lowercase , do_random_crop=_lowercase )
a_ : int = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(_lowercase )
a_ : Any = prepare_img()
a_ : List[Any] = image_processor(images=_lowercase , return_tensors="""pt""" )
a_ : Optional[int] = encoded_inputs.pixel_values.to(_lowercase )
with torch.no_grad():
a_ : List[str] = model(_lowercase )
a_ : Tuple = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _lowercase )
a_ : Any = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _lowercase , atol=1e-1 ) )
@slow
def UpperCamelCase__ ( self ) -> List[str]:
# only resize + normalize
a_ : Tuple = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowercase , align=_lowercase , do_random_crop=_lowercase )
a_ : str = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
_lowercase )
a_ : int = prepare_img()
a_ : Any = image_processor(images=_lowercase , return_tensors="""pt""" )
a_ : Optional[int] = encoded_inputs.pixel_values.to(_lowercase )
with torch.no_grad():
a_ : Any = model(_lowercase )
a_ : str = outputs.logits.detach().cpu()
a_ : int = image_processor.post_process_semantic_segmentation(outputs=_lowercase , target_sizes=[(500, 300)] )
a_ : Any = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _lowercase )
a_ : Tuple = image_processor.post_process_semantic_segmentation(outputs=_lowercase )
a_ : Optional[Any] = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _lowercase )
| 540 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=1_3 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=9_9 , lowerCamelCase_=3_2 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=3_7 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_1_2 , lowerCamelCase_=1_6 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ) -> Optional[int]:
_a : List[str] = parent
_a : Any = batch_size
_a : Dict = seq_length
_a : Union[str, Any] = is_training
_a : Tuple = use_attention_mask
_a : Union[str, Any] = use_token_type_ids
_a : Optional[int] = use_labels
_a : List[Any] = vocab_size
_a : str = hidden_size
_a : Union[str, Any] = num_hidden_layers
_a : Tuple = num_attention_heads
_a : Optional[Any] = intermediate_size
_a : str = hidden_act
_a : Optional[int] = hidden_dropout_prob
_a : Dict = attention_probs_dropout_prob
_a : List[str] = max_position_embeddings
_a : Dict = type_vocab_size
_a : int = type_sequence_label_size
_a : Tuple = initializer_range
_a : str = num_choices
def __UpperCamelCase ( self ) -> Optional[int]:
_a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict = None
if self.use_attention_mask:
_a : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_a : Optional[int] = None
if self.use_token_type_ids:
_a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a : str = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self ) -> Union[str, Any]:
_a : int = self.prepare_config_and_inputs()
_a , _a , _a , _a : int = config_and_inputs
_a : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __UpperCamelCase ( self ) -> List[Any]:
_a : Union[str, Any] = self.prepare_config_and_inputs()
_a , _a , _a , _a : Union[str, Any] = config_and_inputs
_a : Optional[Any] = True
_a : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class a ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : int = True
__lowerCAmelCase : Any = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self ) -> Tuple:
_a : Optional[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCamelCase ( self ) -> Any:
for model_class_name in self.all_model_classes:
_a : Tuple = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase_ )
_a : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
@require_flax
class a ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self ) -> Tuple:
_a : int = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase_ )
_a : int = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : List[str] = model(lowerCamelCase_ )[0]
_a : Tuple = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , lowerCamelCase_ )
# compare the actual values for a slice.
_a : Dict = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) -> int:
_a : Tuple = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowerCamelCase_ )
_a : int = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
_a : Union[str, Any] = model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
_a : Optional[Any] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1e-4 ) )
| 424 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : int = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 424 | 1 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected", [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 1_0, "max_num_jobs": 1}, [range(1_0 )]),
({"num_shards": 1_0, "max_num_jobs": 1_0}, [range(a, i + 1 ) for i in range(1_0 )]),
({"num_shards": 1, "max_num_jobs": 1_0}, [range(1 )]),
({"num_shards": 1_0, "max_num_jobs": 3}, [range(0, 4 ), range(4, 7 ), range(7, 1_0 )]),
({"num_shards": 3, "max_num_jobs": 1_0}, [range(0, 1 ), range(1, 2 ), range(2, 3 )]),
], )
def __a ( a, a ):
"""simple docstring"""
_a = _distribute_shards(**a )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected", [
({"foo": 0}, 1_0, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
], )
def __a ( a, a, a ):
"""simple docstring"""
_a = _split_gen_kwargs(a, a )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected", [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
], )
def __a ( a, a ):
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(a ):
_number_of_shards_in_gen_kwargs(a )
else:
_a = _number_of_shards_in_gen_kwargs(a )
assert out == expected
| 388 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self :List[str] , UpperCamelCase__ :Dict=0.01 , UpperCamelCase__ :Union[str, Any]=1_000 ):
_a = p_stop
_a = max_length
def __iter__( self :Dict ):
_a = 0
_a = False
while not stop and count < self.max_length:
yield count
count += 1
_a = random.random() < self.p_stop
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Tuple , UpperCamelCase__ :Tuple , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :str=False , UpperCamelCase__ :int=True ):
_a = [
BatchSamplerShard(UpperCamelCase__ , 2 , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
for i in range(2 )
]
_a = [list(UpperCamelCase__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(UpperCamelCase__ ) for shard in batch_sampler_shards] , [len(UpperCamelCase__ ) for e in expected] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
# Check the shards when the dataset is a round multiple of total batch size.
_a = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_a = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_a = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_a = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [[], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
# Check the shards when the dataset is a round multiple of batch size.
_a = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
_a = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
_a = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
_a = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_a = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
_a = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [[], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
# Check the shards when the dataset is a round multiple of total batch size.
_a = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_a = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_a = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_a = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [[], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
# Check the shards when the dataset is a round multiple of batch size.
_a = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
_a = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_a = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [[], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :int ):
_a = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_a = [BatchSamplerShard(UpperCamelCase__ , 2 , UpperCamelCase__ , even_batches=UpperCamelCase__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :int , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :Any , UpperCamelCase__ :str=False , UpperCamelCase__ :Optional[Any]=2 , UpperCamelCase__ :int=False ):
random.seed(UpperCamelCase__ )
_a = list(UpperCamelCase__ )
_a = [
IterableDatasetShard(
UpperCamelCase__ , batch_size=UpperCamelCase__ , drop_last=UpperCamelCase__ , num_processes=UpperCamelCase__ , process_index=UpperCamelCase__ , split_batches=UpperCamelCase__ , )
for i in range(UpperCamelCase__ )
]
_a = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(UpperCamelCase__ )
iterable_dataset_lists.append(list(UpperCamelCase__ ) )
_a = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_a = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
self.assertTrue(len(UpperCamelCase__ ) % shard_batch_size == 0 )
_a = []
for idx in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(UpperCamelCase__ ) < len(UpperCamelCase__ ):
reference += reference
self.assertListEqual(UpperCamelCase__ , reference[: len(UpperCamelCase__ )] )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = 42
_a = RandomIterableDataset()
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
# Edge case with a very small dataset
_a = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a = BatchSampler(range(16 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = SkipBatchSampler(UpperCamelCase__ , 2 )
self.assertListEqual(list(UpperCamelCase__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = DataLoader(list(range(16 ) ) , batch_size=4 )
_a = skip_first_batches(UpperCamelCase__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(UpperCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
Accelerator()
_a = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(UpperCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 388 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a =logging.get_logger(__name__)
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ ="backbone." if is_semantic else ""
lowerCamelCase__ =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'''{prefix}cls_token''', "beit.embeddings.cls_token"),
(F'''{prefix}patch_embed.proj.weight''', "beit.embeddings.patch_embeddings.projection.weight"),
(F'''{prefix}patch_embed.proj.bias''', "beit.embeddings.patch_embeddings.projection.bias"),
(F'''{prefix}pos_embed''', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
lowerCamelCase__ ="backbone." if is_semantic else ""
# queries, keys and values
lowerCamelCase__ =state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' )
lowerCamelCase__ =state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' )
lowerCamelCase__ =state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' )
lowerCamelCase__ =in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__ =q_bias
lowerCamelCase__ =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ =in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ =v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCamelCase__ =state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' )
lowerCamelCase__ =state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' )
lowerCamelCase__ =gamma_a
lowerCamelCase__ =gamma_a
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowerCamelCase__ =dct.pop(__lowerCAmelCase )
lowerCamelCase__ =val
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__ =Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict:
'''simple docstring'''
lowerCamelCase__ =False if "rvlcdip" in checkpoint_url else True
lowerCamelCase__ =BeitConfig(use_absolute_position_embeddings=__lowerCAmelCase , use_mask_token=__lowerCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCamelCase__ =1024
lowerCamelCase__ =4096
lowerCamelCase__ =24
lowerCamelCase__ =16
# labels
if "rvlcdip" in checkpoint_url:
lowerCamelCase__ =16
lowerCamelCase__ ="huggingface/label-files"
lowerCamelCase__ ="rvlcdip-id2label.json"
lowerCamelCase__ =json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
lowerCamelCase__ ={int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ =idalabel
lowerCamelCase__ ={v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCamelCase__ =torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="cpu" )["model"]
lowerCamelCase__ =create_rename_keys(__lowerCAmelCase , has_lm_head=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , has_lm_head=__lowerCAmelCase )
# load HuggingFace model
lowerCamelCase__ =BeitForMaskedImageModeling(__lowerCAmelCase ) if has_lm_head else BeitForImageClassification(__lowerCAmelCase )
model.eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image
lowerCamelCase__ =BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCAmelCase )
lowerCamelCase__ =prepare_img()
lowerCamelCase__ =image_processor(images=__lowerCAmelCase , return_tensors="pt" )
lowerCamelCase__ =encoding["pixel_values"]
lowerCamelCase__ =model(__lowerCAmelCase )
lowerCamelCase__ =outputs.logits
# verify logits
lowerCamelCase__ =[1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(__lowerCAmelCase ), "Shape of logits not as expected"
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
if has_lm_head:
lowerCamelCase__ ="dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowerCamelCase__ ="dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__lowerCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__lowerCAmelCase , )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
a =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 707 | """simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase = False ) -> str:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ =F'''Expected string as input, found {type(__lowerCAmelCase )}'''
raise ValueError(__lowerCAmelCase )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ =F'''Expected boolean as use_pascal parameter, found {type(__lowerCAmelCase )}'''
raise ValueError(__lowerCAmelCase )
lowerCamelCase__ =input_str.split("_" )
lowerCamelCase__ =0 if use_pascal else 1
lowerCamelCase__ =words[start_index:]
lowerCamelCase__ =[word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCamelCase__ ="" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 132 | 0 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_snake_case : Any = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_snake_case : List[str] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
_snake_case : int = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_snake_case : Optional[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_snake_case : Optional[Any] = "allenai"
def lowerCAmelCase_ ( __lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__snake_case : List[Any] = dict((re.sub(R"@@$" , "" , __lowerCamelCase ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , __lowerCamelCase ), v) for k, v in d.items() )
__snake_case : Tuple = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
__snake_case : Optional[int] = d[k] # restore
return da
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
# prep
assert os.path.exists(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
__snake_case : Any = basename(__lowerCamelCase )
__snake_case : Union[str, Any] = dirname(__lowerCamelCase )
__snake_case : int = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
__snake_case : Optional[Any] = cls.hub_models()
__snake_case : Union[str, Any] = {"bpe": "fastbpe", "tokenizer": "moses"}
__snake_case : Any = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
__snake_case : Dict = hub_utils.from_pretrained(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , archive_map=__lowerCamelCase , **__lowerCamelCase )
__snake_case : List[Any] = vars(chkpt["args"]["model"] )
__snake_case : int = args["source_lang"]
__snake_case : str = args["target_lang"]
__snake_case : List[str] = dirname(__lowerCamelCase )
__snake_case : str = basename(__lowerCamelCase )
# dicts
__snake_case : str = os.path.join(__lowerCamelCase , F'dict.{src_lang}.txt' )
__snake_case : List[str] = os.path.join(__lowerCamelCase , F'dict.{tgt_lang}.txt' )
__snake_case : List[str] = Dictionary.load(__lowerCamelCase )
__snake_case : Any = rewrite_dict_keys(src_dict.indices )
__snake_case : str = len(__lowerCamelCase )
__snake_case : List[str] = os.path.join(__lowerCamelCase , "vocab-src.json" )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
__snake_case : Tuple = True
for k in src_vocab.keys():
if not k.islower():
__snake_case : Tuple = False
break
__snake_case : Dict = Dictionary.load(__lowerCamelCase )
__snake_case : List[Any] = rewrite_dict_keys(tgt_dict.indices )
__snake_case : Any = len(__lowerCamelCase )
__snake_case : List[str] = os.path.join(__lowerCamelCase , "vocab-tgt.json" )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# merges_file (bpecodes)
__snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
__snake_case : Any = os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
break
with open(__lowerCamelCase , encoding="utf-8" ) as fin:
__snake_case : int = fin.read()
__snake_case : Any = re.sub(R" \d+$" , "" , __lowerCamelCase , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as fout:
fout.write(__lowerCamelCase )
# model config
__snake_case : str = os.path.join(__lowerCamelCase , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
__snake_case : List[Any] = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.0_2,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
__snake_case : Optional[Any] = 5
__snake_case : List[Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
__snake_case : List[str] = best_score_hparams[model_dir]["length_penalty"]
else:
__snake_case : Tuple = 1.0
print(F'Generating {fsmt_model_config_file}' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# tokenizer config
__snake_case : List[str] = os.path.join(__lowerCamelCase , __lowerCamelCase )
__snake_case : Tuple = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1_0_2_4,
"do_lower_case": do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# model
__snake_case : List[Any] = chkpt["models"][0]
__snake_case : Tuple = model.state_dict()
# rename keys to start with 'model.'
__snake_case : Union[str, Any] = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
__snake_case : Any = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
__snake_case : Union[str, Any] = FSMTConfig.from_pretrained(__lowerCamelCase )
__snake_case : Union[str, Any] = FSMTForConditionalGeneration(__lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
# save
__snake_case : int = os.path.join(__lowerCamelCase , __lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(__lowerCamelCase , __lowerCamelCase )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case : Any = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 81 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : List[str] = "sew-d"
def __init__( self , lowercase_=3_2 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_=2 , lowercase_=5_1_2 , lowercase_=2_5_6 , lowercase_=True , lowercase_=True , lowercase_=("p2c", "c2p") , lowercase_="layer_norm" , lowercase_="gelu_python" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=0.0_2 , lowercase_=1E-7 , lowercase_=1E-5 , lowercase_="group" , lowercase_="gelu" , lowercase_=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase_=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase_=False , lowercase_=1_2_8 , lowercase_=1_6 , lowercase_=True , lowercase_=0.0_5 , lowercase_=1_0 , lowercase_=2 , lowercase_=0.0 , lowercase_=1_0 , lowercase_=0 , lowercase_="mean" , lowercase_=False , lowercase_=False , lowercase_=2_5_6 , lowercase_=0 , lowercase_=1 , lowercase_=2 , **lowercase_ , ) -> Dict:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
UpperCAmelCase = hidden_size
UpperCAmelCase = feat_extract_norm
UpperCAmelCase = feat_extract_activation
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = conv_bias
UpperCAmelCase = num_conv_pos_embeddings
UpperCAmelCase = num_conv_pos_embedding_groups
UpperCAmelCase = len(self.conv_dim )
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = squeeze_factor
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = position_buckets
UpperCAmelCase = share_att_key
UpperCAmelCase = relative_attention
UpperCAmelCase = norm_rel_ebd
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = hidden_act
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = feat_proj_dropout
UpperCAmelCase = final_dropout
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = feature_layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = apply_spec_augment
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
# ctc loss
UpperCAmelCase = ctc_loss_reduction
UpperCAmelCase = ctc_zero_infinity
# sequence classification
UpperCAmelCase = use_weighted_layer_sum
UpperCAmelCase = classifier_proj_size
@property
def a_ ( self ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 373 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _lowerCamelCase( lowerCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
SCREAMING_SNAKE_CASE_ : Dict = True if 'large' in model_name or 'huge' in model_name else False
SCREAMING_SNAKE_CASE_ : Any = True if 'large' in model_name or 'huge' in model_name else False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[int] = [3, 3, 3, 3]
SCREAMING_SNAKE_CASE_ : int = [5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE_ : Any = [4, 4, 4, 4]
SCREAMING_SNAKE_CASE_ : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE_ : List[str] = [3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[int] = [3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE_ : Any = [2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE_ : Tuple = 96
elif "small" in model_name:
SCREAMING_SNAKE_CASE_ : Tuple = 96
elif "base" in model_name:
SCREAMING_SNAKE_CASE_ : List[str] = 128
elif "large" in model_name:
SCREAMING_SNAKE_CASE_ : Dict = 192
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 256
elif "huge" in model_name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 352
# set label information
SCREAMING_SNAKE_CASE_ : int = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'imagenet-22k-id2label.json'
else:
SCREAMING_SNAKE_CASE_ : List[str] = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : int = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Tuple = FocalNetConfig(
embed_dim=lowerCAmelCase__ , depths=lowerCAmelCase__ , focal_levels=lowerCAmelCase__ , focal_windows=lowerCAmelCase__ , use_conv_embed=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , use_post_layernorm=lowerCAmelCase__ , use_layerscale=lowerCAmelCase__ , )
return config
def _lowerCamelCase( lowerCAmelCase__ : Optional[Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = 'encoder.' + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE_ : List[str] = 'layernorm.weight'
if name == "norm.bias":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'layernorm.bias'
if "head" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('head' , 'classifier' )
else:
SCREAMING_SNAKE_CASE_ : Tuple = 'focalnet.' + name
return name
def _lowerCamelCase( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
SCREAMING_SNAKE_CASE_ : Tuple = model_name_to_url[model_name]
print('Checkpoint URL: ' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : Dict = state_dict.pop(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = val
SCREAMING_SNAKE_CASE_ : List[Any] = get_focalnet_config(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
SCREAMING_SNAKE_CASE_ : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCAmelCase__ , crop_size=224 , do_normalize=lowerCAmelCase__ , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
SCREAMING_SNAKE_CASE_ : List[str] = processor(images=lowerCAmelCase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Dict = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
SCREAMING_SNAKE_CASE_ : str = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCAmelCase__ , atol=1E-4 )
SCREAMING_SNAKE_CASE_ : Any = model(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 97 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = {'vocab_file': 'spm_char.model'}
A = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
A = {
'microsoft/speecht5_asr': 1024,
'microsoft/speecht5_tts': 1024,
'microsoft/speecht5_vc': 1024,
}
class __a ( __A ):
'''simple docstring'''
UpperCAmelCase__ : str = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__ = None , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def __snake_case ( self ):
return self.sp_model.get_piece_size()
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : str = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : List[str] = None
return state
def __setstate__( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self , UpperCamelCase__ ):
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
return self.sp_model.piece_to_id(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.IdToPiece(UpperCamelCase__ )
return token
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : int = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
SCREAMING_SNAKE_CASE_ : Any = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1]
if token_ids_a is None:
return ([0] * len(UpperCamelCase__ )) + suffix_ones
return ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,) | 97 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.