code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 | 1 |
"""simple docstring"""
import math
def lowerCAmelCase_ ( lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : Tuple = 2
__SCREAMING_SNAKE_CASE : Optional[int] = int(math.sqrt(_A ) ) # Size of every segment
__SCREAMING_SNAKE_CASE : List[str] = [True] * (end + 1)
__SCREAMING_SNAKE_CASE : Optional[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(_A )
for i in range(start * start , end + 1 , _A ):
__SCREAMING_SNAKE_CASE : Tuple = False
start += 1
prime += in_prime
__SCREAMING_SNAKE_CASE : List[Any] = end + 1
__SCREAMING_SNAKE_CASE : Optional[Any] = min(2 * end , _A )
while low <= n:
__SCREAMING_SNAKE_CASE : int = [True] * (high - low + 1)
for each in in_prime:
__SCREAMING_SNAKE_CASE : List[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_A , high + 1 , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = False
for j in range(len(_A ) ):
if temp[j] is True:
prime.append(j + low )
__SCREAMING_SNAKE_CASE : Optional[Any] = high + 1
__SCREAMING_SNAKE_CASE : Dict = min(high + end , _A )
return prime
print(sieve(10**6))
| 709 |
"""simple docstring"""
_lowerCamelCase = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowerCamelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowerCamelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 401 | 0 |
from __future__ import annotations
A : Union[str, Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
A : Union[str, Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _lowerCAmelCase ( _lowerCAmelCase ) -> list[float]:
'''simple docstring'''
__snake_case = []
__snake_case = len(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
__snake_case = -1
for j in range(i + 1 , _lowerCAmelCase ):
if arr[i] < arr[j]:
__snake_case = arr[j]
break
result.append(_lowerCAmelCase )
return result
def _lowerCAmelCase ( _lowerCAmelCase ) -> list[float]:
'''simple docstring'''
__snake_case = []
for i, outer in enumerate(_lowerCAmelCase ):
__snake_case = -1
for inner in arr[i + 1 :]:
if outer < inner:
__snake_case = inner
break
result.append(_lowerCAmelCase )
return result
def _lowerCAmelCase ( _lowerCAmelCase ) -> list[float]:
'''simple docstring'''
__snake_case = len(_lowerCAmelCase )
__snake_case = []
__snake_case = [-1] * arr_size
for index in reversed(range(_lowerCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__snake_case = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
A : int = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 371 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A : int = logging.get_logger()
@dataclass
class UpperCamelCase:
snake_case_ : nn.Module
snake_case_ : List[nn.Module] = field(default_factory=_a )
snake_case_ : list = field(default_factory=_a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tensor , SCREAMING_SNAKE_CASE : Tensor ) -> Tuple:
'''simple docstring'''
__snake_case = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE , nn.BatchNormad )
if has_not_submodules:
self.traced.append(SCREAMING_SNAKE_CASE )
def __call__( self : int , SCREAMING_SNAKE_CASE : Tensor ) -> List[str]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(SCREAMING_SNAKE_CASE )
[x.remove() for x in self.handles]
return self
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
return list(filter(lambda SCREAMING_SNAKE_CASE : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCamelCase:
snake_case_ : nn.Module
snake_case_ : nn.Module
snake_case_ : int = 0
snake_case_ : List = field(default_factory=_a )
snake_case_ : List = field(default_factory=_a )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE : Tensor ) -> Dict:
'''simple docstring'''
__snake_case = Tracker(self.dest )(SCREAMING_SNAKE_CASE ).parametrized
__snake_case = Tracker(self.src )(SCREAMING_SNAKE_CASE ).parametrized
__snake_case = list(filter(lambda SCREAMING_SNAKE_CASE : type(SCREAMING_SNAKE_CASE ) not in self.src_skip , SCREAMING_SNAKE_CASE ) )
__snake_case = list(filter(lambda SCREAMING_SNAKE_CASE : type(SCREAMING_SNAKE_CASE ) not in self.dest_skip , SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE )} operations while'''
f''' destination module has {len(SCREAMING_SNAKE_CASE )}.''' )
for dest_m, src_m in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True ) -> Dict:
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
__snake_case = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ).eval()
__snake_case = ResNetForImageClassification(_lowerCAmelCase ).eval()
__snake_case = ModuleTransfer(src=_lowerCAmelCase , dest=_lowerCAmelCase )
__snake_case = torch.randn((1, 3, 224, 224) )
module_transfer(_lowerCAmelCase )
assert torch.allclose(from_model(_lowerCAmelCase ) , our_model(_lowerCAmelCase ).logits ), "The model logits don't match the original one."
__snake_case = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(_lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=_lowerCAmelCase , )
# we can use the convnext one
__snake_case = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=_lowerCAmelCase , )
print(F'''Pushed {checkpoint_name}''' )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = True ) -> Dict:
'''simple docstring'''
__snake_case = "imagenet-1k-id2label.json"
__snake_case = 1000
__snake_case = (1, num_labels)
__snake_case = "huggingface/label-files"
__snake_case = num_labels
__snake_case = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
__snake_case = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
__snake_case = partial(_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase )
__snake_case = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(_lowerCAmelCase , names_to_config[model_name] , _lowerCAmelCase , _lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
A : Union[str, Any] = parser.parse_args()
A : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 371 | 1 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__snake_case = logging.get_logger(__name__)
__snake_case = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _a :
"""simple docstring"""
A_ = field(
default=__a , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__a )} )
A_ = field(
default=__a , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
A_ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A_ = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
A_ = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
A_ = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
A_ = field(
default=__a , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
A_ = field(
default=__a , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
A_ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
A_ = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
A_ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
A_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class _a ( __a ):
"""simple docstring"""
A_ = '''train'''
A_ = '''dev'''
class _a ( __a ):
"""simple docstring"""
A_ = 4_2
A_ = 4_2
A_ = 4_2
A_ = 4_2
def __init__( self : Optional[Any] , lowercase_ : SquadDataTrainingArguments , lowercase_ : PreTrainedTokenizer , lowercase_ : Optional[int] = None , lowercase_ : Union[str, Split] = Split.train , lowercase_ : Optional[bool] = False , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = "pt" , ):
'''simple docstring'''
lowercase_ = args
lowercase_ = is_language_sensitive
lowercase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_ , lowercase_ ):
try:
lowercase_ = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
lowercase_ = mode
# Load data features from cache or dataset file
lowercase_ = """v2""" if args.version_2_with_negative else """v1"""
lowercase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase_ = cached_features_file + """.lock"""
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
lowercase_ = time.time()
lowercase_ = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase_ = self.old_features["""features"""]
lowercase_ = self.old_features.get("""dataset""" , lowercase_ )
lowercase_ = self.old_features.get("""examples""" , lowercase_ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
""" future run""" )
else:
if mode == Split.dev:
lowercase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowercase_ = self.processor.get_train_examples(args.data_dir )
lowercase_ , lowercase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase_ , )
lowercase_ = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , lowercase_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Tuple , lowercase_ : Tuple ):
'''simple docstring'''
lowercase_ = self.features[i]
lowercase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase_ = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowercase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 700 | '''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
lowercase_ = 0
lowercase_ = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
lowercase_ = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
lowercase_ = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
lowercase_ = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
lowercase_ = 0
lowercase_ = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
lowercase_ = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
lowercase_ = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
lowercase_ = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 603 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def a__ ( _UpperCamelCase : Optional[int] ):
if "resnet-50" in model_name:
__lowerCamelCase = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
__lowerCamelCase = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
__lowerCamelCase = DetrConfig(use_timm_backbone=__lowercase ,backbone_config=__lowercase )
# set label attributes
__lowerCamelCase = '''panoptic''' in model_name
if is_panoptic:
__lowerCamelCase = 2_50
else:
__lowerCamelCase = 91
__lowerCamelCase = '''huggingface/label-files'''
__lowerCamelCase = '''coco-detection-id2label.json'''
__lowerCamelCase = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='''dataset''' ) ,'''r''' ) )
__lowerCamelCase = {int(__lowercase ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : List[str] ):
__lowerCamelCase = state_dict.pop(__lowercase )
__lowerCamelCase = val
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any]=False ):
__lowerCamelCase = ''''''
if is_panoptic:
__lowerCamelCase = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:2_56, :]
__lowerCamelCase = in_proj_bias[:2_56]
__lowerCamelCase = in_proj_weight[2_56:5_12, :]
__lowerCamelCase = in_proj_bias[2_56:5_12]
__lowerCamelCase = in_proj_weight[-2_56:, :]
__lowerCamelCase = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__lowerCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:2_56, :]
__lowerCamelCase = in_proj_bias[:2_56]
__lowerCamelCase = in_proj_weight[2_56:5_12, :]
__lowerCamelCase = in_proj_bias[2_56:5_12]
__lowerCamelCase = in_proj_weight[-2_56:, :]
__lowerCamelCase = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
__lowerCamelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
__lowerCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__lowerCamelCase = in_proj_weight_cross_attn[:2_56, :]
__lowerCamelCase = in_proj_bias_cross_attn[:2_56]
__lowerCamelCase = in_proj_weight_cross_attn[2_56:5_12, :]
__lowerCamelCase = in_proj_bias_cross_attn[2_56:5_12]
__lowerCamelCase = in_proj_weight_cross_attn[-2_56:, :]
__lowerCamelCase = in_proj_bias_cross_attn[-2_56:]
def a__ ( ):
__lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase = Image.open(requests.get(__lowercase ,stream=__lowercase ).raw )
return im
@torch.no_grad()
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : List[Any]=None ,_UpperCamelCase : Dict=False ):
__lowerCamelCase ,__lowerCamelCase = get_detr_config(__lowercase )
# load original model from torch hub
__lowerCamelCase = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F"""Converting model {model_name}...""" )
__lowerCamelCase = torch.hub.load('''facebookresearch/detr''' ,model_name_to_original_name[model_name] ,pretrained=__lowercase ).eval()
__lowerCamelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__lowercase ):
if is_panoptic:
__lowerCamelCase = '''detr.''' + src
rename_key(__lowercase ,__lowercase ,__lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowercase ,is_panoptic=__lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCamelCase = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__lowerCamelCase = state_dict.pop(__lowercase )
__lowerCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCamelCase = state_dict.pop(__lowercase )
__lowerCamelCase = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__lowerCamelCase = state_dict.pop(__lowercase )
__lowerCamelCase = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__lowerCamelCase = state_dict.pop(__lowercase )
__lowerCamelCase = val
# finally, create HuggingFace model and load state dict
__lowerCamelCase = DetrForSegmentation(__lowercase ) if is_panoptic else DetrForObjectDetection(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
# verify our conversion on an image
__lowerCamelCase = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__lowerCamelCase = DetrImageProcessor(format=__lowercase )
__lowerCamelCase = processor(images=prepare_img() ,return_tensors='''pt''' )
__lowerCamelCase = encoding['''pixel_values''']
__lowerCamelCase = detr(__lowercase )
__lowerCamelCase = model(__lowercase )
assert torch.allclose(outputs.logits ,original_outputs['''pred_logits'''] ,atol=1e-3 )
assert torch.allclose(outputs.pred_boxes ,original_outputs['''pred_boxes'''] ,atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs['''pred_masks'''] ,atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
a_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 175 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : Optional[Any]=7 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[Any]=19 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Tuple=5 , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : Dict=37 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Union[str, Any]=512 , __UpperCAmelCase : List[str]=16 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Optional[int]=4 , __UpperCAmelCase : Tuple=None , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__UpperCAmelCase , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , )
return config
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ):
'''simple docstring'''
_A = EsmForProteinFolding(config=__UpperCAmelCase ).float()
model.to(__UpperCAmelCase )
model.eval()
_A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
_A = model(__UpperCAmelCase )
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = False
snake_case = (EsmForProteinFolding,) if is_torch_available() else ()
snake_case = ()
snake_case = {} if is_torch_available() else {}
snake_case = False
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = EsmFoldModelTester(self )
_A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@unittest.skip("Does not support attention outputs" )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip
def lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip("Esm does not support embedding resizing" )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support head pruning." )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip("ESMFold only has one output format." )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("ESMFold does not support input chunking." )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
@require_torch
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
_A = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_A = model(__UpperCAmelCase )["positions"]
_A = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __UpperCAmelCase , atol=1E-4 ) )
| 330 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowercase_ ( _A : NDArray[floataa] , _A : NDArray[floataa] , _A : list[int] , _A : int , ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = coefficient_matrix.shape
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
lowerCamelCase__ : Dict = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(_A )
if colsa != 1:
lowerCamelCase__ : Optional[Any] = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(_A )
if rowsa != rowsa:
lowerCamelCase__ : Union[str, Any] = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(_A )
if len(_A ) != rowsa:
lowerCamelCase__ : Union[str, Any] = (
"Number of initial values must be equal to number of rows in coefficient "
F"matrix but received {len(_A )} and {rowsa}"
)
raise ValueError(_A )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
lowerCamelCase__ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = table.shape
strictly_diagonally_dominant(_A )
# Iterates the whole matrix for given number of times
for _ in range(_A ):
lowerCamelCase__ : Tuple = []
for row in range(_A ):
lowerCamelCase__ : List[str] = 0
for col in range(_A ):
if col == row:
lowerCamelCase__ : Union[str, Any] = table[row][col]
elif col == cols - 1:
lowerCamelCase__ : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCamelCase__ : str = (temp + val) / denom
new_val.append(_A )
lowerCamelCase__ : Optional[Any] = new_val
return [float(_A ) for i in new_val]
def lowercase_ ( _A : NDArray[floataa] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = table.shape
lowerCamelCase__ : List[str] = True
for i in range(0 , _A ):
lowerCamelCase__ : Dict = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
def lowercase_ ( _A : int ):
"""simple docstring"""
if not isinstance(_A , _A ):
lowerCamelCase__ : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(_A )
if number < 0:
return False
lowerCamelCase__ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 1 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : List[Any] = {}
_snake_case : Any = job['started_at']
_snake_case : List[str] = job['completed_at']
_snake_case : Union[str, Any] = date_parser.parse(__lowercase )
_snake_case : List[Any] = date_parser.parse(__lowercase )
_snake_case : Dict = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_snake_case : List[str] = start
_snake_case : Dict = end
_snake_case : List[Any] = duration_in_min
return job_info
def snake_case (__lowercase , __lowercase=None ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : List[str] = None
if token is not None:
_snake_case : Optional[int] = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
_snake_case : Any = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_snake_case : List[str] = requests.get(__lowercase , headers=__lowercase ).json()
_snake_case : List[str] = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(__lowercase ) for job in result["jobs"]} )
_snake_case : Union[str, Any] = math.ceil((result["total_count"] - 100) / 100 )
for i in range(__lowercase ):
_snake_case : Dict = requests.get(url + F"""&page={i + 2}""" , headers=__lowercase ).json()
job_time.update({job["name"]: extract_time_from_single_job(__lowercase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
__SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_job_time(args.workflow_run_id)
__SCREAMING_SNAKE_CASE : str = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''') | 670 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : str = """vivit"""
def __init__( self , lowerCamelCase_=2_2_4 , lowerCamelCase_=3_2 , lowerCamelCase_=[2, 1_6, 1_6] , lowerCamelCase_=3 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_="gelu_fast" , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-06 , lowerCamelCase_=True , **lowerCamelCase_ , ) -> int:
_a : Tuple = hidden_size
_a : Dict = num_hidden_layers
_a : List[str] = num_attention_heads
_a : int = intermediate_size
_a : Optional[int] = hidden_act
_a : Dict = hidden_dropout_prob
_a : List[str] = attention_probs_dropout_prob
_a : List[str] = initializer_range
_a : List[str] = layer_norm_eps
_a : Any = image_size
_a : Optional[Any] = num_frames
_a : Dict = tubelet_size
_a : Union[str, Any] = num_channels
_a : Optional[int] = qkv_bias
super().__init__(**lowerCamelCase_ )
| 120 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=99 , UpperCAmelCase_ : Tuple=64 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : List[Any]=None , ):
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : int = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = embedding_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : int = num_labels
SCREAMING_SNAKE_CASE : int = num_choices
SCREAMING_SNAKE_CASE : Tuple = scope
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self : Tuple ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def _A ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : int = MegatronBertModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A ( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = MegatronBertForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = MegatronBertForCausalLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : int = MegatronBertForNextSentencePrediction(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : int = MegatronBertForPreTraining(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , next_sentence_label=UpperCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _A ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Union[str, Any] = MegatronBertForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = self.num_labels
SCREAMING_SNAKE_CASE : Dict = MegatronBertForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = MegatronBertForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE : Dict = MegatronBertForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : int = config_and_inputs
SCREAMING_SNAKE_CASE : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : int = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Optional[Any] = True
# test_resize_embeddings = False
UpperCamelCase_ : Any = False
def _A ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str=False ):
SCREAMING_SNAKE_CASE : Union[str, Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
if return_labels:
if model_class in get_values(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
return inputs_dict
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def _A ( self : List[str] ):
self.config_tester.run_common_tests()
def _A ( self : int ):
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*UpperCAmelCase_ )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*UpperCAmelCase_ )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*UpperCAmelCase_ )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*UpperCAmelCase_ )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*UpperCAmelCase_ )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*UpperCAmelCase_ )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*UpperCAmelCase_ )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*UpperCAmelCase_ )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return torch.tensor(
lowercase , dtype=torch.long , device=lowercase , )
snake_case = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("Model is not available." )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Any = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(os.environ["MYDIR"] , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = MegatronBertModel.from_pretrained(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.half()
SCREAMING_SNAKE_CASE : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE : Any = output[0, ii, jj]
SCREAMING_SNAKE_CASE : Dict = expected[3 * ii + jj]
SCREAMING_SNAKE_CASE : Any = "ii={} jj={} a={} b={}".format(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.assertTrue(math.isclose(UpperCAmelCase_ , UpperCAmelCase_ , rel_tol=UpperCAmelCase_ , abs_tol=UpperCAmelCase_ ) , msg=UpperCAmelCase_ )
| 706 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
snake_case = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : Union[str, Any]=-1 ):
# in NER datasets, the last column is usually reserved for NER label
SCREAMING_SNAKE_CASE : int = label_idx
def _A ( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[Split, str] ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = mode.value
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(UpperCAmelCase_ , f'''{mode}.txt''' )
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Any = []
with open(UpperCAmelCase_ , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[str] = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=UpperCAmelCase_ , labels=UpperCAmelCase_ ) )
guid_index += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = []
else:
SCREAMING_SNAKE_CASE : str = line.split(" " )
words.append(splits[0] )
if len(UpperCAmelCase_ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=UpperCAmelCase_ , labels=UpperCAmelCase_ ) )
return examples
def _A ( self : Tuple , UpperCAmelCase_ : TextIO , UpperCAmelCase_ : TextIO , UpperCAmelCase_ : List ):
SCREAMING_SNAKE_CASE : List[str] = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(UpperCAmelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
SCREAMING_SNAKE_CASE : Optional[int] = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(UpperCAmelCase_ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def _A ( self : Optional[Any] , UpperCAmelCase_ : str ):
if path:
with open(UpperCAmelCase_ , "r" ) as f:
SCREAMING_SNAKE_CASE : List[Any] = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE : Tuple = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _A ( self : Optional[int] , UpperCAmelCase_ : str ):
if path:
with open(UpperCAmelCase_ , "r" ) as f:
SCREAMING_SNAKE_CASE : Dict = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE : str = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def _A ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[Split, str] ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : str = mode.value
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(UpperCAmelCase_ , f'''{mode}.txt''' )
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : str = []
with open(UpperCAmelCase_ , encoding="utf-8" ) as f:
for sentence in parse_incr(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : List[str] = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=UpperCAmelCase_ , labels=UpperCAmelCase_ ) )
guid_index += 1
return examples
def _A ( self : str , UpperCAmelCase_ : TextIO , UpperCAmelCase_ : TextIO , UpperCAmelCase_ : List ):
SCREAMING_SNAKE_CASE : Dict = 0
for sentence in parse_incr(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = preds_list[example_id]
SCREAMING_SNAKE_CASE : Any = ""
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(UpperCAmelCase_ )
example_id += 1
def _A ( self : Dict , UpperCAmelCase_ : str ):
if path:
with open(UpperCAmelCase_ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 488 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_) -> None:
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_) | 34 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=0) -> int:
UpperCamelCase = 1.0 if scale is None else scale
UpperCamelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_)])
@property
def UpperCAmelCase__ ( self) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self) -> List[str]:
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self) -> Any:
return self.variance.sqrt()
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = args_dim
UpperCamelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_) for dim in args_dim.values()])
UpperCamelCase = domain_map
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Tuple[torch.Tensor]:
UpperCamelCase = [proj(lowerCamelCase_) for proj in self.proj]
return self.domain_map(*lowerCamelCase_)
class snake_case_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase_) -> int:
super().__init__()
UpperCamelCase = function
def UpperCAmelCase__ ( self , lowerCamelCase_ , *lowerCamelCase_) -> Tuple:
return self.function(lowerCamelCase_ , *lowerCamelCase_)
class snake_case_ :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
def __init__( self , lowerCamelCase_ = 1) -> None:
UpperCamelCase = dim
UpperCamelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_)
else:
return Independent(self.distribution_class(*lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Distribution:
UpperCamelCase = self._base_distribution(lowerCamelCase_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self) -> int:
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self) -> float:
return 0.0
def UpperCAmelCase__ ( self , lowerCamelCase_) -> nn.Module:
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self , *lowerCamelCase_) -> List[str]:
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCamelCase_) + 4.0)) / 2.0
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"df": 1, "loc": 1, "scale": 1}
A_ = StudentT
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
UpperCamelCase = 2.0 + cls.squareplus(lowerCamelCase_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"loc": 1, "scale": 1}
A_ = Normal
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> str:
UpperCamelCase = cls.squareplus(lowerCamelCase_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = {"total_count": 1, "logits": 1}
A_ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
UpperCamelCase = cls.squareplus(lowerCamelCase_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_)
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_) , 1)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None) -> Distribution:
UpperCamelCase , UpperCamelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 34 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = """https://openaipublic.azureedge.net/jukebox/models/"""
SCREAMING_SNAKE_CASE : Dict = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def __A ( _A ):
"""simple docstring"""
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
__a = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
__a = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
__a = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
__a = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
__a = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
__a = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__a = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
__a = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
__a = {}
import re
__a = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__a = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__a = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__a = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__a = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__a = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__a = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
__a = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__a = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_A ):
__a = re_encoder_block_conv_in.match(_A )
__a = regex_match.groups()
__a = int(groups[2] ) * 2 + int(groups[3] )
__a = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
__a = re_encoder_block_conv_in.sub(_A , _A )
elif re_encoder_block_resnet.fullmatch(_A ):
__a = re_encoder_block_resnet.match(_A )
__a = regex_match.groups()
__a = int(groups[2] ) * 2 + int(groups[3] )
__a = {"1": 1, "3": 2}[groups[-2]]
__a = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
__a = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__a = prefix + resnet_block
__a = re_encoder_block_resnet.sub(_A , _A )
elif re_encoder_block_proj_out.fullmatch(_A ):
__a = re_encoder_block_proj_out.match(_A )
__a = regex_match.groups()
__a = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
__a = re_encoder_block_proj_out.sub(_A , _A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_A ):
__a = re_decoder_block_conv_out.match(_A )
__a = regex_match.groups()
__a = int(groups[2] ) * 2 + int(groups[3] ) - 2
__a = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
__a = re_decoder_block_conv_out.sub(_A , _A )
elif re_decoder_block_resnet.fullmatch(_A ):
__a = re_decoder_block_resnet.match(_A )
__a = regex_match.groups()
__a = int(groups[2] ) * 2 + int(groups[3] ) - 2
__a = {"1": 1, "3": 2}[groups[-2]]
__a = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
__a = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__a = prefix + resnet_block
__a = re_decoder_block_resnet.sub(_A , _A )
elif re_decoder_block_proj_in.fullmatch(_A ):
__a = re_decoder_block_proj_in.match(_A )
__a = regex_match.groups()
__a = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
__a = re_decoder_block_proj_in.sub(_A , _A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_A ):
__a = re_prior_cond_conv_out.match(_A )
__a = regex_match.groups()
__a = int(groups[1] ) * 2 + int(groups[2] ) - 2
__a = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
__a = re_prior_cond_conv_out.sub(_A , _A )
elif re_prior_cond_resnet.fullmatch(_A ):
__a = re_prior_cond_resnet.match(_A )
__a = regex_match.groups()
__a = int(groups[1] ) * 2 + int(groups[2] ) - 2
__a = {"1": 1, "3": 2}[groups[-2]]
__a = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
__a = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__a = prefix + resnet_block
__a = re_prior_cond_resnet.sub(_A , _A )
elif re_prior_cond_proj_in.fullmatch(_A ):
__a = re_prior_cond_proj_in.match(_A )
__a = regex_match.groups()
__a = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
__a = re_prior_cond_proj_in.sub(_A , _A )
# keep original key
else:
__a = original_key
__a = replace_key(_A )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
__a = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
__a = original_key
__a = original_key
__a = value
return new_dict
@torch.no_grad()
def __A ( _A=None , _A=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
__a = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_A )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_A )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , "wb" ).write(r.content )
__a = MODEL_MAPPING[model_name.split("/" )[-1]]
__a = JukeboxConfig.from_pretrained(_A )
__a = JukeboxModel(_A )
__a = []
__a = {}
for i, dict_name in enumerate(_A ):
__a = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["model"]
__a = {}
for k in old_dic.keys():
if k.endswith(".b" ):
__a = old_dic[k]
elif k.endswith(".w" ):
__a = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__a = old_dic[k]
else:
__a = old_dic[k]
__a = "vqvae" if i == 0 else f"""priors.{3 - i}"""
__a = fix_jukebox_keys(_A , model.state_dict() , _A , _A )
weight_dict.append(_A )
__a = weight_dict.pop(0 )
model.vqvae.load_state_dict(_A )
for i in range(len(_A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_A ).mkdir(exist_ok=_A )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , "w" ) as txtfile:
json.dump(_A , _A )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_A )
return weight_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 719 | # Function to print upper half of diamond (pyramid)
def __A ( _A ):
"""simple docstring"""
for i in range(0 , _A ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def __A ( _A ):
"""simple docstring"""
for i in range(_A , 0 , -1 ):
for _ in range(_A , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def __A ( _A ):
"""simple docstring"""
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(_A ) # upper half
reverse_floyd(_A ) # lower half
if __name__ == "__main__":
print(R"""| /\ | |- | |- |--| |\ /| |-""")
print(R"""|/ \| |- |_ |_ |__| | \/ | |_""")
SCREAMING_SNAKE_CASE : Tuple = 1
while K:
SCREAMING_SNAKE_CASE : Tuple = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
SCREAMING_SNAKE_CASE : Optional[int] = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 525 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase ( a_ , a_ , a_ , unittest.TestCase ):
__UpperCAmelCase = AltDiffusionPipeline
__UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self) -> Any:
torch.manual_seed(0)
__snake_case = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
__snake_case = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0)
__snake_case = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0)
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
__snake_case = CLIPTextModel(lowercase_)
__snake_case = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta')
__snake_case = 7_7
__snake_case = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _a ( self , lowercase_ , lowercase_=0) -> Tuple:
if str(lowercase_).startswith('mps'):
__snake_case = torch.manual_seed(lowercase_)
else:
__snake_case = torch.Generator(device=lowercase_).manual_seed(lowercase_)
__snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _a ( self) -> Tuple:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3)
def _a ( self) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def _a ( self) -> Optional[int]:
__snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
torch.manual_seed(0)
__snake_case = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
__snake_case = RobertaSeriesModelWithTransformation(lowercase_)
__snake_case = text_encoder
__snake_case = AltDiffusionPipeline(**lowercase_)
__snake_case = alt_pipe.to(lowercase_)
alt_pipe.set_progress_bar_config(disable=lowercase_)
__snake_case = self.get_dummy_inputs(lowercase_)
__snake_case = 'A photo of an astronaut'
__snake_case = alt_pipe(**lowercase_)
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__snake_case = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _a ( self) -> Tuple:
__snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = PNDMScheduler(skip_prk_steps=lowercase_)
torch.manual_seed(0)
__snake_case = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
__snake_case = RobertaSeriesModelWithTransformation(lowercase_)
__snake_case = text_encoder
__snake_case = AltDiffusionPipeline(**lowercase_)
__snake_case = alt_pipe.to(lowercase_)
alt_pipe.set_progress_bar_config(disable=lowercase_)
__snake_case = self.get_dummy_inputs(lowercase_)
__snake_case = alt_pipe(**lowercase_)
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__snake_case = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def _a ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self) -> int:
# make sure here that pndm scheduler skips prk
__snake_case = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=lowercase_)
__snake_case = alt_pipe.to(lowercase_)
alt_pipe.set_progress_bar_config(disable=lowercase_)
__snake_case = 'A painting of a squirrel eating a burger'
__snake_case = torch.manual_seed(0)
__snake_case = alt_pipe([prompt] , generator=lowercase_ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='np')
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _a ( self) -> Union[str, Any]:
__snake_case = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler')
__snake_case = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=lowercase_ , safety_checker=lowercase_)
__snake_case = alt_pipe.to(lowercase_)
alt_pipe.set_progress_bar_config(disable=lowercase_)
__snake_case = 'A painting of a squirrel eating a burger'
__snake_case = torch.manual_seed(0)
__snake_case = alt_pipe([prompt] , generator=lowercase_ , num_inference_steps=2 , output_type='numpy')
__snake_case = output.images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__snake_case = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 313 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Tuple = '''▁'''
A : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''}
A : str = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
A : Any = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1024,
}
# fmt: off
A : Dict = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = PRETRAINED_VOCAB_FILES_MAP
a = ["input_ids", "attention_mask"]
a = []
a = []
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE : Dict="<s>" , SCREAMING_SNAKE_CASE : int="<unk>" , SCREAMING_SNAKE_CASE : Tuple="<pad>" , SCREAMING_SNAKE_CASE : List[str]="<mask>" , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE : str , ):
# Mask token behave like a normal word, i.e. include the space before it
_A : Tuple = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) else mask_token
_A : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
_A : Optional[int] = kwargs.get('additional_special_tokens' , [])
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=SCREAMING_SNAKE_CASE , tgt_lang=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(SCREAMING_SNAKE_CASE))
_A : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_A : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_A : List[Any] = 1
_A : List[Any] = len(self.sp_model)
_A : Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(SCREAMING_SNAKE_CASE)
}
_A : Dict = {v: k for k, v in self.lang_code_to_id.items()}
_A : List[str] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
_A : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_A : str = src_lang if src_lang is not None else 'en_XX'
_A : Any = self.lang_code_to_id[self._src_lang]
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def A ( self : Optional[Any]):
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A ( self : Union[str, Any]):
return self._src_lang
@src_lang.setter
def A ( self : List[str] , SCREAMING_SNAKE_CASE : str):
_A : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : Dict):
_A : int = self.__dict__.copy()
_A : Optional[Any] = None
return state
def __setstate__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict):
_A : Tuple = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_A : Any = {}
_A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A ( self : Any):
_A : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A ( self : int , SCREAMING_SNAKE_CASE : str):
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE)
def A ( self : List[str] , SCREAMING_SNAKE_CASE : str):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A : str = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any]):
_A : Tuple = []
_A : Any = ''
_A : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE) + token
_A : Any = True
_A : Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE)
_A : Tuple = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE)
return out_string.strip()
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
_A : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE , 'wb') as fi:
_A : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
def A ( self : int , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE)
_A : str = [1] * len(self.prefix_tokens)
_A : Optional[Any] = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE)) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE)) + ([0] * len(SCREAMING_SNAKE_CASE)) + suffix_ones
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] , SCREAMING_SNAKE_CASE : Optional[str] , **SCREAMING_SNAKE_CASE : str):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
_A : Optional[Any] = src_lang
_A : Optional[int] = self(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_A : List[str] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE)
_A : List[str] = tgt_lang_id
return inputs
def A ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str = "en_XX" , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , SCREAMING_SNAKE_CASE : str = "ro_RO" , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
_A : Any = src_lang
_A : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
def A ( self : List[Any]):
return self.set_src_lang_special_tokens(self.src_lang)
def A ( self : Any):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def A ( self : Tuple , SCREAMING_SNAKE_CASE : str):
_A : Optional[int] = self.lang_code_to_id[src_lang]
_A : Dict = [self.cur_lang_code_id]
_A : List[str] = [self.eos_token_id]
def A ( self : int , SCREAMING_SNAKE_CASE : str):
_A : str = self.lang_code_to_id[tgt_lang]
_A : int = [self.cur_lang_code_id]
_A : str = [self.eos_token_id]
| 128 | 0 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __a (UpperCamelCase_):
'''simple docstring'''
def _a ( self , _a ) -> Union[str, Any]:
"""simple docstring"""
with open(_a , encoding="""utf-8""" ) as input_file:
SCREAMING_SNAKE_CASE__ : str = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_file.read()
SCREAMING_SNAKE_CASE__ : str = regexp.search(_a )
return match
def _a ( self , _a ) -> Optional[Any]:
"""simple docstring"""
with open(_a , encoding="""utf-8""" ) as input_file:
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
SCREAMING_SNAKE_CASE__ : List[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
SCREAMING_SNAKE_CASE__ : Dict = regexp.finditer(_a )
SCREAMING_SNAKE_CASE__ : int = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = Path("""./datasets""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_a ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path("""./datasets""" )
SCREAMING_SNAKE_CASE__ : List[str] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_a ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 12 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a :Any = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Union[str, Any] = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Optional[Any] = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :List[Any] = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
a :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 12 | 1 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = ["image_processor"]
_A = "SamImageProcessor"
def __init__( self , lowercase__ ):
"""simple docstring"""
super().__init__(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor
SCREAMING_SNAKE_CASE_ : List[Any] = -10
SCREAMING_SNAKE_CASE_ : Any = self.image_processor.size["longest_edge"]
def __call__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.image_processor(
lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
# pop arguments that are not used in the foward but used nevertheless
SCREAMING_SNAKE_CASE_ : Any = encoding_image_processor["original_sizes"]
if hasattr(lowercase__ , "numpy" ): # Checks if Torch or TF tensor
SCREAMING_SNAKE_CASE_ : Optional[Any] = original_sizes.numpy()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = self._check_and_preprocess_points(
input_points=lowercase__ , input_labels=lowercase__ , input_boxes=lowercase__ , )
SCREAMING_SNAKE_CASE_ : str = self._normalize_and_convert(
lowercase__ , lowercase__ , input_points=lowercase__ , input_labels=lowercase__ , input_boxes=lowercase__ , return_tensors=lowercase__ , )
return encoding_image_processor
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="pt" , ):
"""simple docstring"""
if input_points is not None:
if len(lowercase__ ) != len(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [
self._normalize_coordinates(self.target_size , lowercase__ , original_sizes[0] ) for point in input_points
]
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
self._normalize_coordinates(self.target_size , lowercase__ , lowercase__ )
for point, original_size in zip(lowercase__ , lowercase__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = self._pad_points_and_labels(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = np.array(lowercase__ )
if input_labels is not None:
SCREAMING_SNAKE_CASE_ : Any = np.array(lowercase__ )
if input_boxes is not None:
if len(lowercase__ ) != len(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Any = [
self._normalize_coordinates(self.target_size , lowercase__ , original_sizes[0] , is_bounding_box=lowercase__ )
for box in input_boxes
]
else:
SCREAMING_SNAKE_CASE_ : Any = [
self._normalize_coordinates(self.target_size , lowercase__ , lowercase__ , is_bounding_box=lowercase__ )
for box, original_size in zip(lowercase__ , lowercase__ )
]
SCREAMING_SNAKE_CASE_ : Tuple = np.array(lowercase__ )
if input_boxes is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE_ : Any = torch.from_numpy(lowercase__ )
# boxes batch size of 1 by default
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE_ : Dict = tf.convert_to_tensor(lowercase__ )
# boxes batch size of 1 by default
SCREAMING_SNAKE_CASE_ : Any = tf.expand_dims(lowercase__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.from_numpy(lowercase__ )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE_ : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE_ : List[str] = tf.convert_to_tensor(lowercase__ )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE_ : int = tf.expand_dims(lowercase__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE_ : List[str] = torch.from_numpy(lowercase__ )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE_ : Any = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE_ : List[str] = tf.convert_to_tensor(lowercase__ )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.expand_dims(lowercase__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def __lowerCamelCase ( self , lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = max([point.shape[0] for point in input_points] )
SCREAMING_SNAKE_CASE_ : Any = []
for i, point in enumerate(lowercase__ ):
if point.shape[0] != expected_nb_points:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
SCREAMING_SNAKE_CASE_ : List[Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = processed_input_points
return input_points, input_labels
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = original_size
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor._get_preprocess_shape(lowercase__ , longest_edge=lowercase__ )
SCREAMING_SNAKE_CASE_ : int = deepcopy(lowercase__ ).astype(lowercase__ )
if is_bounding_box:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = coords.reshape(-1 , 2 , 2 )
SCREAMING_SNAKE_CASE_ : Any = coords[..., 0] * (new_w / old_w)
SCREAMING_SNAKE_CASE_ : Optional[int] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
SCREAMING_SNAKE_CASE_ : Dict = coords.reshape(-1 , 4 )
return coords
def __lowerCamelCase ( self , lowercase__=None , lowercase__=None , lowercase__=None , ):
"""simple docstring"""
if input_points is not None:
if hasattr(lowercase__ , "numpy" ): # Checks for TF or Torch tensor
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_points.numpy().tolist()
if not isinstance(lowercase__ , lowercase__ ) or not isinstance(input_points[0] , lowercase__ ):
raise ValueError("Input points must be a list of list of floating points." )
SCREAMING_SNAKE_CASE_ : str = [np.array(lowercase__ ) for input_point in input_points]
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if input_labels is not None:
if hasattr(lowercase__ , "numpy" ):
SCREAMING_SNAKE_CASE_ : Any = input_labels.numpy().tolist()
if not isinstance(lowercase__ , lowercase__ ) or not isinstance(input_labels[0] , lowercase__ ):
raise ValueError("Input labels must be a list of list integers." )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.array(lowercase__ ) for label in input_labels]
else:
SCREAMING_SNAKE_CASE_ : Dict = None
if input_boxes is not None:
if hasattr(lowercase__ , "numpy" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = input_boxes.numpy().tolist()
if (
not isinstance(lowercase__ , lowercase__ )
or not isinstance(input_boxes[0] , lowercase__ )
or not isinstance(input_boxes[0][0] , lowercase__ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
SCREAMING_SNAKE_CASE_ : List[Any] = [np.array(lowercase__ ).astype(np.floataa ) for box in input_boxes]
else:
SCREAMING_SNAKE_CASE_ : str = None
return input_points, input_labels, input_boxes
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase__ ) )
def __lowerCamelCase ( self , *lowercase__ , **lowercase__ ):
"""simple docstring"""
return self.image_processor.post_process_masks(*lowercase__ , **lowercase__ )
| 421 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 421 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case :
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str]=13 ,SCREAMING_SNAKE_CASE__ : Tuple=30 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=3 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=32 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 ,SCREAMING_SNAKE_CASE__ : int=4 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=37 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]=10 ,SCREAMING_SNAKE_CASE__ : Any=0.02 ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,):
SCREAMING_SNAKE_CASE:int = parent
SCREAMING_SNAKE_CASE:Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE:Dict = image_size
SCREAMING_SNAKE_CASE:int = patch_size
SCREAMING_SNAKE_CASE:Any = num_channels
SCREAMING_SNAKE_CASE:List[Any] = is_training
SCREAMING_SNAKE_CASE:Optional[Any] = use_labels
SCREAMING_SNAKE_CASE:str = hidden_size
SCREAMING_SNAKE_CASE:int = num_hidden_layers
SCREAMING_SNAKE_CASE:Tuple = num_attention_heads
SCREAMING_SNAKE_CASE:Tuple = intermediate_size
SCREAMING_SNAKE_CASE:str = hidden_act
SCREAMING_SNAKE_CASE:Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE:Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:str = type_sequence_label_size
SCREAMING_SNAKE_CASE:Optional[int] = initializer_range
SCREAMING_SNAKE_CASE:Optional[int] = scope
SCREAMING_SNAKE_CASE:str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE:List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE:Optional[int] = num_patches + 1
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE:str = None
if self.use_labels:
SCREAMING_SNAKE_CASE:Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE:Tuple = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Optional[Any] ):
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=UpperCamelCase__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Optional[Any] = ViTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE:int = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ):
SCREAMING_SNAKE_CASE:List[Any] = ViTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE:Any = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE:Optional[Any] = 1
SCREAMING_SNAKE_CASE:str = ViTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE:Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE:Dict = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict ):
SCREAMING_SNAKE_CASE:str = self.type_sequence_label_size
SCREAMING_SNAKE_CASE:str = ViTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE:Any = model(UpperCamelCase__ ,labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE:Union[str, Any] = 1
SCREAMING_SNAKE_CASE:int = ViTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE:Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE:List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:List[Any] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
):Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE:Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_A : List[str] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_A : Tuple = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
_A : Optional[Any] = True
_A : Dict = False
_A : Union[str, Any] = False
_A : Tuple = False
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = ViTModelTester(self )
SCREAMING_SNAKE_CASE:Optional[Any] = ConfigTester(self ,config_class=UpperCamelCase__ ,has_text_modality=UpperCamelCase__ ,hidden_size=37 )
def __UpperCamelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __UpperCamelCase ( self : Dict ):
pass
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE:str = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE:int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ ,nn.Linear ) )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE:Optional[int] = model_class(UpperCamelCase__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE:Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE:Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,UpperCamelCase__ )
def __UpperCamelCase ( self : Dict ):
SCREAMING_SNAKE_CASE:int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def __UpperCamelCase ( self : Any ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE:Tuple = ViTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def A_ ( ):
SCREAMING_SNAKE_CASE:Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Optional[int] ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:Any = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE:List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE:Dict = prepare_img()
SCREAMING_SNAKE_CASE:Dict = image_processor(images=UpperCamelCase__ ,return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE:Optional[int] = model(**UpperCamelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE:Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,UpperCamelCase__ )
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,UpperCamelCase__ ,atol=1e-4 ) )
@slow
def __UpperCamelCase ( self : Any ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
SCREAMING_SNAKE_CASE:Optional[Any] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE:List[str] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" ,size=480 )
SCREAMING_SNAKE_CASE:Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE:int = image_processor(images=UpperCamelCase__ ,return_tensors="pt" )
SCREAMING_SNAKE_CASE:str = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE:int = model(UpperCamelCase__ ,interpolate_pos_encoding=UpperCamelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE:Dict = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape ,UpperCamelCase__ )
SCREAMING_SNAKE_CASE:int = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,UpperCamelCase__ ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:Any = ViTModel.from_pretrained("facebook/dino-vits8" ,torch_dtype=torch.floataa ,device_map="auto" )
SCREAMING_SNAKE_CASE:Any = self.default_image_processor
SCREAMING_SNAKE_CASE:Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE:Tuple = image_processor(images=UpperCamelCase__ ,return_tensors="pt" )
SCREAMING_SNAKE_CASE:Dict = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE:List[Any] = model(UpperCamelCase__ )
| 711 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( _a , unittest.TestCase ):
_A : str = KandinskyVaaInpaintPipeline
_A : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_A : List[str] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_A : Optional[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_A : int = False
@property
def __UpperCamelCase ( self : Any ):
return 32
@property
def __UpperCamelCase ( self : Tuple ):
return 32
@property
def __UpperCamelCase ( self : Union[str, Any] ):
return self.time_input_dim
@property
def __UpperCamelCase ( self : Any ):
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ):
return 100
@property
def __UpperCamelCase ( self : Any ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Optional[Any] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE:Dict = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def __UpperCamelCase ( self : Dict ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : int ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:str = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:Tuple = self.dummy_unet
SCREAMING_SNAKE_CASE:Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE:Any = DDIMScheduler(
num_train_timesteps=1_000 ,beta_schedule="linear" ,beta_start=0.00_085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type="epsilon" ,thresholding=SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:Any = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ):
SCREAMING_SNAKE_CASE:Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE__ )
# create init_image
SCREAMING_SNAKE_CASE:Any = floats_tensor((1, 3, 64, 64) ,rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE:Union[str, Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert("RGB" ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE:int = np.ones((64, 64) ,dtype=np.floataa )
SCREAMING_SNAKE_CASE:Optional[Any] = 0
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE:Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE:List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:List[Any] = "cpu"
SCREAMING_SNAKE_CASE:Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE:List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:List[Any] = output.images
SCREAMING_SNAKE_CASE:int = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0]
SCREAMING_SNAKE_CASE:Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE:Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE:Union[str, Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def __UpperCamelCase ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
SCREAMING_SNAKE_CASE:str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
SCREAMING_SNAKE_CASE:List[Any] = np.ones((768, 768) ,dtype=np.floataa )
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
SCREAMING_SNAKE_CASE:Optional[int] = "a hat"
SCREAMING_SNAKE_CASE:Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" ,torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE:List[str] = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = pipe_prior(
SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt="" ,).to_tuple()
SCREAMING_SNAKE_CASE:Dict = pipeline(
image=SCREAMING_SNAKE_CASE__ ,mask_image=SCREAMING_SNAKE_CASE__ ,image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=100 ,height=768 ,width=768 ,output_type="np" ,)
SCREAMING_SNAKE_CASE:List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
| 465 | 0 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> int:
return x if y == 0 else greatest_common_divisor(__UpperCamelCase , x % y)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> int:
return (x * y) // greatest_common_divisor(__UpperCamelCase , __UpperCamelCase)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase = 20) -> int:
a = 1
for i in range(1 , n + 1):
a = lcm(__UpperCamelCase , __UpperCamelCase)
return g
if __name__ == "__main__":
print(F'{solution() = }')
| 515 |
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase) -> Any:
a = [x.strip() for x in open(__UpperCamelCase).readlines()]
a = [x.strip() for x in open(__UpperCamelCase).readlines()][: len(__UpperCamelCase)]
a = calculate_rouge(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase)
if save_path is not None:
save_json(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase)
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 515 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'spm_char.model'}
lowerCAmelCase_ = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCAmelCase_ = {
'microsoft/speecht5_asr': 10_24,
'microsoft/speecht5_tts': 10_24,
'microsoft/speecht5_vc': 10_24,
}
class _A ( lowercase_ ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : int , _A : Optional[int] , _A : Optional[Any]="<s>" , _A : List[str]="</s>" , _A : Any="<unk>" , _A : Optional[int]="<pad>" , _A : Optional[Dict[str, Any]] = None , **_A : Optional[int] , ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowercase : str = vocab_file
lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
return self.sp_model.get_piece_size()
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = self.__dict__.copy()
lowercase : Optional[Any] = None
return state
def __setstate__( self : Union[str, Any] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : List[Any] = {}
lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self : str , _A : str ) -> int:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __a ( self : str , _A : Optional[Any] ) -> List[str]:
"""simple docstring"""
return self.sp_model.piece_to_id(UpperCamelCase__ )
def __a ( self : int , _A : List[str] ) -> List[str]:
"""simple docstring"""
lowercase : Optional[Any] = self.sp_model.IdToPiece(UpperCamelCase__ )
return token
def __a ( self : List[Any] , _A : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase : int = []
lowercase : Any = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
lowercase : Optional[Any] = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def __a ( self : Any , _A : Tuple , _A : Union[str, Any]=None ) -> Dict:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __a ( self : List[str] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> Union[str, Any]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
lowercase : str = [1]
if token_ids_a is None:
return ([0] * len(UpperCamelCase__ )) + suffix_ones
return ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones
def __a ( self : Any , _A : str , _A : Optional[str] = None ) -> Tuple:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : List[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
lowercase : List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,) | 711 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 596 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = DanceDiffusionPipeline
_lowerCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
_lowerCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_lowerCAmelCase = False
_lowerCAmelCase = False
def a ( self ):
torch.manual_seed(0 )
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=A_ , use_timestep_embedding=A_ , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
_UpperCamelCase = IPNDMScheduler()
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
}
return components
def a ( self , A_ , A_=0 ):
if str(A_ ).startswith("mps" ):
_UpperCamelCase = torch.manual_seed(A_ )
else:
_UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
_UpperCamelCase = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def a ( self ):
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = DanceDiffusionPipeline(**A_ )
_UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCamelCase = self.get_dummy_inputs(A_ )
_UpperCamelCase = pipe(**A_ )
_UpperCamelCase = output.audios
_UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCamelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def a ( self ):
return super().test_save_load_local()
@skip_mps
def a ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def a ( self ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self ):
return super().test_attention_slicing_forward_pass()
def a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
_UpperCamelCase = torch_device
_UpperCamelCase = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
_UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(generator=A_ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
_UpperCamelCase = output.audios
_UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self ):
_UpperCamelCase = torch_device
_UpperCamelCase = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(generator=A_ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
_UpperCamelCase = output.audios
_UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 138 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase__( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : PreTrainedTokenizer , _UpperCamelCase : int , _UpperCamelCase : Optional[int] = None , )-> List[Any]:
"""simple docstring"""
_UpperCamelCase = {}
if train_file is not None:
_UpperCamelCase = [train_file]
if eval_file is not None:
_UpperCamelCase = [eval_file]
if test_file is not None:
_UpperCamelCase = [test_file]
_UpperCamelCase = datasets.load_dataset("csv" , data_files=_UpperCamelCase )
_UpperCamelCase = list(ds[list(files.keys() )[0]].features.keys() )
_UpperCamelCase = features_name.pop(_UpperCamelCase )
_UpperCamelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
_UpperCamelCase = {label: i for i, label in enumerate(_UpperCamelCase )}
_UpperCamelCase = tokenizer.model_input_names
_UpperCamelCase = {}
if len(_UpperCamelCase ) == 1:
for k in files.keys():
_UpperCamelCase = ds[k].map(
lambda _UpperCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" ) , batched=_UpperCamelCase , )
elif len(_UpperCamelCase ) == 2:
for k in files.keys():
_UpperCamelCase = ds[k].map(
lambda _UpperCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , ) , batched=_UpperCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_UpperCamelCase = {k: v for k, v in ex.items() if k in input_names}
_UpperCamelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_UpperCamelCase = {k: v for k, v in ex.items() if k in input_names}
_UpperCamelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_UpperCamelCase = {k: v for k, v in ex.items() if k in input_names}
_UpperCamelCase = labelaid[ex[label_name]]
yield (d, label)
_UpperCamelCase = (
tf.data.Dataset.from_generator(
_UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_UpperCamelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_UpperCamelCase = (
tf.data.Dataset.from_generator(
_UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_UpperCamelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_UpperCamelCase = (
tf.data.Dataset.from_generator(
_UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_UpperCamelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
snake_case_ : Tuple = logging.getLogger(__name__)
@dataclass
class A_ :
'''simple docstring'''
_lowerCAmelCase = field(metadata={"""help""": """Which column contains the label"""} )
_lowerCAmelCase = field(default=lowerCAmelCase_ , metadata={"""help""": """The path of the training file"""} )
_lowerCAmelCase = field(default=lowerCAmelCase_ , metadata={"""help""": """The path of the development file"""} )
_lowerCAmelCase = field(default=lowerCAmelCase_ , metadata={"""help""": """The path of the test file"""} )
_lowerCAmelCase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowerCAmelCase = field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class A_ :
'''simple docstring'''
_lowerCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowerCAmelCase = field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowerCAmelCase = field(
default=lowerCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowerCAmelCase = field(default=lowerCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowerCAmelCase = field(
default=lowerCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def lowercase__( )-> List[Any]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
f"16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_UpperCamelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_UpperCamelCase : EvalPrediction ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_UpperCamelCase = TFTrainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=_UpperCamelCase , eval_dataset=_UpperCamelCase , compute_metrics=_UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(_UpperCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
results.update(_UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 138 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ : Optional[int] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 521 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCAmelCase_ : Tuple = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
lowerCAmelCase_ : Union[str, Any] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
lowerCAmelCase_ : Tuple = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def UpperCamelCase__ ( self : Optional[int] , __a : List[Any] , __a : str , __a : int=None , __a : Dict=True , __a : Optional[int]=False ):
if rouge_types is None:
_a = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
_a = rouge_scorer.RougeScorer(rouge_types=__a , use_stemmer=__a )
if use_aggregator:
_a = scoring.BootstrapAggregator()
else:
_a = []
for ref, pred in zip(__a , __a ):
_a = scorer.score(__a , __a )
if use_aggregator:
aggregator.add_scores(__a )
else:
scores.append(__a )
if use_aggregator:
_a = aggregator.aggregate()
else:
_a = {}
for key in scores[0]:
_a = [score[key] for score in scores]
return result
| 521 | 1 |
'''simple docstring'''
def A ( UpperCamelCase_ : int ) -> str:
'''simple docstring'''
lowerCAmelCase__ = int(UpperCamelCase_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = divmod(UpperCamelCase_ , 2 )
return binary_recursive(UpperCamelCase_ ) + str(UpperCamelCase_ )
def A ( UpperCamelCase_ : str ) -> str:
'''simple docstring'''
lowerCAmelCase__ = str(UpperCamelCase_ ).strip()
if not number:
raise ValueError("No input value was provided" )
lowerCAmelCase__ = "-" if number.startswith("-" ) else ""
lowerCAmelCase__ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return F"""{negative}0b{binary_recursive(int(UpperCamelCase_ ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48 |
'''simple docstring'''
from __future__ import annotations
a__ : Optional[int] = list[tuple[int, int]]
a__ : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a__ : Optional[int] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __snake_case :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Any:
snake_case__ = pos_x
snake_case__ = pos_y
snake_case__ = (pos_y, pos_x)
snake_case__ = goal_x
snake_case__ = goal_y
snake_case__ = g_cost
snake_case__ = parent
snake_case__ = self.calculate_heuristic()
def _snake_case ( self ) -> float:
snake_case__ = abs(self.pos_x - self.goal_x )
snake_case__ = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , UpperCamelCase_ ) -> bool:
return self.f_cost < other.f_cost
class __snake_case :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
snake_case__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase_ )
snake_case__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , UpperCamelCase_ )
snake_case__ = [self.start]
snake_case__ = []
snake_case__ = False
def _snake_case ( self ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ = True
return self.retrace_path(UpperCamelCase_ )
self.closed_nodes.append(UpperCamelCase_ )
snake_case__ = self.get_successors(UpperCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase_ )
else:
# retrieve the best current path
snake_case__ = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase_ )
else:
self.open_nodes.append(UpperCamelCase_ )
if not self.reached:
return [self.start.pos]
return None
def _snake_case ( self , UpperCamelCase_ ) -> list[Node]:
snake_case__ = []
for action in delta:
snake_case__ = parent.pos_x + action[1]
snake_case__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase_ , UpperCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase_ , ) )
return successors
def _snake_case ( self , UpperCamelCase_ ) -> Path:
snake_case__ = node
snake_case__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
a__ : List[str] = (0, 0)
a__ : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
a__ : Optional[int] = GreedyBestFirst(init, goal)
a__ : Optional[int] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
a__ : Tuple = 2
for elem in grid:
print(elem)
| 368 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
__lowerCamelCase : List[str] = logging.getLogger(__name__)
@dataclass
class a__ :
A = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
A = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
A = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A = field(
default=lowerCAmelCase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
A = field(
default=lowerCAmelCase__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
A = field(
default=lowerCAmelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A = field(
default=lowerCAmelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
A = field(
default=lowerCAmelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
A = field(
default=lowerCAmelCase__ , metadata={'help': 'A csv or a json file containing the training data.'} )
A = field(
default=lowerCAmelCase__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
A = field(default=lowerCAmelCase__ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
SCREAMING_SNAKE_CASE_ : Any = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class a__ :
A = field(
default=lowerCAmelCase__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A = field(
default=lowerCAmelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A = field(
default=lowerCAmelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A = field(
default=lowerCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A = field(
default=lowerCAmelCase__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
A = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A = field(
default=lowerCAmelCase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(_A )
datasets.utils.logging.set_verbosity(_A )
transformers.utils.logging.set_verbosity(_A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_ : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_ : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE_ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
SCREAMING_SNAKE_CASE_ : Dict = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
SCREAMING_SNAKE_CASE_ : Tuple = data_args.train_file.split("." )[-1]
SCREAMING_SNAKE_CASE_ : Any = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
SCREAMING_SNAKE_CASE_ : Optional[int] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
SCREAMING_SNAKE_CASE_ : List[Any] = load_dataset("csv" , data_files=_A , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset("json" , data_files=_A , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
SCREAMING_SNAKE_CASE_ : Any = raw_datasets["train"].features["label"].names
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
SCREAMING_SNAKE_CASE_ : Any = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_A , )
SCREAMING_SNAKE_CASE_ : int = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE_ : Optional[int] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE_ : Dict = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
SCREAMING_SNAKE_CASE_ : int = {"Refused": 0, "Entailed": 1}
SCREAMING_SNAKE_CASE_ : int = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase : Optional[Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
SCREAMING_SNAKE_CASE_ : Tuple = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
SCREAMING_SNAKE_CASE_ : List[str] = examples["statement"]
SCREAMING_SNAKE_CASE_ : int = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(_A , _A , padding=_A , max_length=_A , truncation=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
SCREAMING_SNAKE_CASE_ : str = raw_datasets.map(
_A , batched=_A , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = raw_datasets["train"]
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_ : Tuple = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
SCREAMING_SNAKE_CASE_ : Any = raw_datasets["test"]
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_A ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase : EvalPrediction ):
SCREAMING_SNAKE_CASE_ : Optional[int] = p.predictions[0] if isinstance(p.predictions , _A ) else p.predictions
SCREAMING_SNAKE_CASE_ : Any = np.argmax(_A , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE_ : Dict = default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE_ : Optional[Any] = DataCollatorWithPadding(_A , pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE_ : str = None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ : Tuple = Trainer(
model=_A , args=_A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_A , tokenizer=_A , data_collator=_A , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_ : str = last_checkpoint
SCREAMING_SNAKE_CASE_ : Optional[int] = trainer.train(resume_from_checkpoint=_A )
SCREAMING_SNAKE_CASE_ : List[str] = train_result.metrics
SCREAMING_SNAKE_CASE_ : Dict = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_A )
)
SCREAMING_SNAKE_CASE_ : int = min(_A , len(_A ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , _A )
trainer.save_metrics("train" , _A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = trainer.evaluate(eval_dataset=_A )
SCREAMING_SNAKE_CASE_ : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = min(_A , len(_A ) )
trainer.log_metrics("eval" , _A )
trainer.save_metrics("eval" , _A )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
SCREAMING_SNAKE_CASE_ : List[Any] = predict_dataset.remove_columns("label" )
SCREAMING_SNAKE_CASE_ : str = trainer.predict(_A , metric_key_prefix="predict" ).predictions
SCREAMING_SNAKE_CASE_ : List[str] = np.argmax(_A , axis=1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(_A , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(_A ):
SCREAMING_SNAKE_CASE_ : int = label_list[item]
writer.write(f'{index}\t{item}\n' )
SCREAMING_SNAKE_CASE_ : Any = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**_A )
else:
trainer.create_model_card(**_A )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 709 | from typing import Any
import numpy as np
def _snake_case ( lowerCAmelCase : np.ndarray ):
"""simple docstring"""
return np.array_equal(lowerCAmelCase , matrix.conjugate().T )
def _snake_case ( lowerCAmelCase : np.ndarray , lowerCAmelCase : np.ndarray ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = v.conjugate().T
SCREAMING_SNAKE_CASE_ : int = v_star.dot(lowerCAmelCase )
assert isinstance(lowerCAmelCase , np.ndarray )
return (v_star_dot.dot(lowerCAmelCase )) / (v_star.dot(lowerCAmelCase ))
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
SCREAMING_SNAKE_CASE_ : Dict = np.array([[1], [2], [3]] )
assert is_hermitian(lowerCAmelCase ), f'{a} is not hermitian.'
print(rayleigh_quotient(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowerCAmelCase ), f'{a} is not hermitian.'
assert rayleigh_quotient(lowerCAmelCase , lowerCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 316 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Tuple = logging.get_logger(__name__)
def _A ( A ) -> List[List[ImageInput]]:
if isinstance(A ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
_snake_case = ['''pixel_values''']
def __init__( self , a_ = True , a_ = None , a_ = PILImageResampling.BILINEAR , a_ = True , a_ = None , a_ = True , a_ = 1 / 2_5_5 , a_ = True , a_ = None , a_ = None , **a_ , ) -> None:
super().__init__(**a_ )
lowercase : Any = size if size is not None else {"shortest_edge": 2_2_4}
lowercase : List[str] = get_size_dict(a_ , default_to_square=a_ )
lowercase : Optional[int] = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
lowercase : List[str] = get_size_dict(a_ , param_name="crop_size" )
lowercase : int = do_resize
lowercase : int = size
lowercase : Tuple = do_center_crop
lowercase : Dict = crop_size
lowercase : List[str] = resample
lowercase : Dict = do_rescale
lowercase : Optional[int] = rescale_factor
lowercase : Optional[int] = do_normalize
lowercase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self , a_ , a_ , a_ = PILImageResampling.BILINEAR , a_ = None , **a_ , ) -> np.ndarray:
lowercase : Dict = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" in size:
lowercase : int = get_resize_output_image_size(a_ , size["shortest_edge"] , default_to_square=a_ )
elif "height" in size and "width" in size:
lowercase : Union[str, Any] = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def a__ ( self , a_ , a_ , a_ = None , **a_ , ) -> np.ndarray:
lowercase : Optional[int] = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(a_ , size=(size["height"], size["width"]) , data_format=a_ , **a_ )
def a__ ( self , a_ , a_ , a_ = None , **a_ , ) -> Optional[Any]:
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def a__ ( self , a_ , a_ , a_ , a_ = None , **a_ , ) -> np.ndarray:
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def a__ ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = to_numpy_array(a_ )
if do_resize:
lowercase : str = self.resize(image=a_ , size=a_ , resample=a_ )
if do_center_crop:
lowercase : Dict = self.center_crop(a_ , size=a_ )
if do_rescale:
lowercase : Tuple = self.rescale(image=a_ , scale=a_ )
if do_normalize:
lowercase : str = self.normalize(image=a_ , mean=a_ , std=a_ )
lowercase : Union[str, Any] = to_channel_dimension_format(a_ , a_ )
return image
def a__ ( self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ) -> PIL.Image.Image:
lowercase : int = do_resize if do_resize is not None else self.do_resize
lowercase : List[str] = resample if resample is not None else self.resample
lowercase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Dict = image_mean if image_mean is not None else self.image_mean
lowercase : List[Any] = image_std if image_std is not None else self.image_std
lowercase : List[Any] = size if size is not None else self.size
lowercase : Optional[int] = get_size_dict(a_ , default_to_square=a_ )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : List[str] = get_size_dict(a_ , param_name="crop_size" )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
lowercase : str = make_batched(a_ )
lowercase : List[str] = [
[
self._preprocess_image(
image=a_ , do_resize=a_ , size=a_ , resample=a_ , do_center_crop=a_ , crop_size=a_ , do_rescale=a_ , rescale_factor=a_ , do_normalize=a_ , image_mean=a_ , image_std=a_ , data_format=a_ , )
for img in video
]
for video in videos
]
lowercase : Tuple = {"pixel_values": videos}
return BatchFeature(data=a_ , tensor_type=a_ )
| 372 |
'''simple docstring'''
from manim import *
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def a__ ( self ) -> List[str]:
lowercase : List[Any] = Rectangle(height=0.5 , width=0.5 )
lowercase : str = Rectangle(height=0.25 , width=0.25 )
lowercase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase : List[str] = [mem.copy() for i in range(6 )]
lowercase : Any = [mem.copy() for i in range(6 )]
lowercase : List[str] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : List[Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : List[Any] = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
lowercase : Union[str, Any] = Text("CPU" , font_size=2_4 )
lowercase : List[Any] = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a_ )
lowercase : List[Any] = [mem.copy() for i in range(4 )]
lowercase : Union[str, Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : Dict = Text("GPU" , font_size=2_4 )
lowercase : Tuple = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
gpu.move_to([-1, -1, 0] )
self.add(a_ )
lowercase : Tuple = [mem.copy() for i in range(6 )]
lowercase : Optional[int] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : Any = Text("Model" , font_size=2_4 )
lowercase : str = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
model.move_to([3, -1.0, 0] )
self.add(a_ )
lowercase : Dict = []
lowercase : Tuple = []
lowercase : List[Any] = []
for i, rect in enumerate(a_ ):
rect.set_stroke(a_ )
lowercase : Tuple = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=a_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=a_ , buff=0.0 )
self.add(a_ )
model_cpu_arr.append(a_ )
self.add(*a_ , *a_ , *a_ )
lowercase : Any = [mem.copy() for i in range(6 )]
lowercase : Dict = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : List[str] = Text("Loaded Checkpoint" , font_size=2_4 )
lowercase : Optional[Any] = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(a_ )
lowercase : Any = []
lowercase : int = []
for i, rect in enumerate(a_ ):
lowercase : str = fill.copy().set_fill(a_ , opacity=0.7 )
target.move_to(a_ )
ckpt_arr.append(a_ )
lowercase : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(a_ )
self.add(*a_ , *a_ )
lowercase : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase : str = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a_ , a_ )
lowercase : Any = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(a_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a_ )
lowercase : List[Any] = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
lowercase : Any = [meta_mem.copy() for i in range(6 )]
lowercase : Dict = [meta_mem.copy() for i in range(6 )]
lowercase : Union[str, Any] = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : Dict = VGroup(*a_ ).arrange(a_ , buff=0 )
lowercase : Any = VGroup(a_ , a_ ).arrange(a_ , buff=0 )
lowercase : Optional[Any] = Text("Disk" , font_size=2_4 )
lowercase : List[str] = Group(a_ , a_ ).arrange(a_ , buff=0.5 , aligned_edge=a_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(a_ , run_time=3 ) , Write(a_ , run_time=1 ) , Create(a_ , run_time=1 ) )
lowercase : Optional[Any] = []
for i, rect in enumerate(a_ ):
lowercase : int = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(a_ , run_time=1.5 ) )
self.play(*a_ )
self.play(FadeOut(a_ ) )
lowercase : List[Any] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(a_ , run_time=3 ) )
self.play(
FadeOut(a_ , a_ , *a_ , *a_ ) , )
self.wait()
| 372 | 1 |
"""simple docstring"""
from copy import deepcopy
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ = None , snake_case_ = None ) -> None:
if arr is None and size is not None:
__lowerCAmelCase = size
__lowerCAmelCase = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError("""Either arr or size must be specified""" )
def A__ ( self , snake_case_ ) -> None:
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = deepcopy(snake_case_ )
for i in range(1 , self.size ):
__lowerCAmelCase = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def A__ ( self ) -> list[int]:
__lowerCAmelCase = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
__lowerCAmelCase = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def A__ ( snake_case_ ) -> int:
return index + (index & (-index))
@staticmethod
def A__ ( snake_case_ ) -> int:
return index - (index & (-index))
def A__ ( self , snake_case_ , snake_case_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
__lowerCAmelCase = self.next_(snake_case_ )
def A__ ( self , snake_case_ , snake_case_ ) -> None:
self.add(snake_case_ , value - self.get(snake_case_ ) )
def A__ ( self , snake_case_ ) -> int:
if right == 0:
return 0
__lowerCAmelCase = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
__lowerCAmelCase = self.prev(snake_case_ )
return result
def A__ ( self , snake_case_ , snake_case_ ) -> int:
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def A__ ( self , snake_case_ ) -> int:
return self.query(snake_case_ , index + 1 )
def A__ ( self , snake_case_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
__lowerCAmelCase = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
__lowerCAmelCase = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 573 |
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=14 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_labels
__lowerCAmelCase = use_mc_token_ids
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
__lowerCAmelCase = self.vocab_size - 1
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
if self.use_mc_token_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def A__ ( self ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ) -> int:
__lowerCAmelCase = CTRLModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
model(snake_case_ , token_type_ids=snake_case_ , head_mask=snake_case_ )
model(snake_case_ , token_type_ids=snake_case_ )
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ) -> List[str]:
__lowerCAmelCase = CTRLLMHeadModel(snake_case_ )
model.to(snake_case_ )
model.eval()
__lowerCAmelCase = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ) -> Dict:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = CTRLForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCAmelCase_ ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
_snake_case = (CTRLLMHeadModel,) if is_torch_available() else ()
_snake_case = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = False
_snake_case = False
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = CTRLModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , n_embd=37 )
def A__ ( self ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self ) -> List[Any]:
pass
@slow
def A__ ( self ) -> Union[str, Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = CTRLModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def A__ ( self ) -> Tuple:
pass
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> Dict:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def A__ ( self ) -> int:
__lowerCAmelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(snake_case_ )
__lowerCAmelCase = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=snake_case_ ) # Legal the president is
__lowerCAmelCase = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowerCAmelCase = model.generate(snake_case_ , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].tolist() , snake_case_ )
| 573 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase_ ( A ):
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , __A , __A ) -> Any:
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self , __A = 1 , __A = 50 , __A = None , __A = "pil" , __A = True , **__A , ) -> Union[Tuple, ImagePipelineOutput]:
SCREAMING_SNAKE_CASE_ : Tuple =self.unet.config.sample_size
SCREAMING_SNAKE_CASE_ : Any =(batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE_ : int =self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
SCREAMING_SNAKE_CASE_ : Tuple =randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
SCREAMING_SNAKE_CASE_ : Optional[int] =self.scheduler.schedule[t]
SCREAMING_SNAKE_CASE_ : int =self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
SCREAMING_SNAKE_CASE_ : List[str] =self.scheduler.add_noise_to_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE_ : Union[str, Any] =(sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
SCREAMING_SNAKE_CASE_ : Optional[int] =self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE_ : str =(sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
SCREAMING_SNAKE_CASE_ : int =self.scheduler.step_correct(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , step_output.prev_sample , step_output['''derivative'''] , )
SCREAMING_SNAKE_CASE_ : int =step_output.prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] =(sample / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ : List[Any] =self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 443 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __lowerCamelCase ( a_ : Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE :Optional[int] = os.path.join(args.tf_model_dir , '''parameters.json''' )
__SCREAMING_SNAKE_CASE :Dict = json.loads(open(a_ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
__SCREAMING_SNAKE_CASE :Tuple = args.output + '''.pt'''
__SCREAMING_SNAKE_CASE :Union[str, Any] = OrderedDict()
with tf.device('''/CPU:0''' ):
__SCREAMING_SNAKE_CASE :Optional[int] = tf.train.load_checkpoint(args.tf_model_dir )
__SCREAMING_SNAKE_CASE :Tuple = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
__SCREAMING_SNAKE_CASE :str = reader.get_tensor(a_ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
__SCREAMING_SNAKE_CASE :Optional[Any] = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
__SCREAMING_SNAKE_CASE :List[str] = 8
__SCREAMING_SNAKE_CASE :List[str] = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
__SCREAMING_SNAKE_CASE :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :Any = torch.tensor(a_ )
elif key_name.startswith('''model/moe''' ):
__SCREAMING_SNAKE_CASE :List[Any] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
__SCREAMING_SNAKE_CASE :str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :Optional[int] = torch.tensor(a_ )
elif key_name.endswith('''/softmlp/kernel''' ):
__SCREAMING_SNAKE_CASE :Tuple = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
__SCREAMING_SNAKE_CASE :Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :List[Any] = torch.tensor(a_ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
__SCREAMING_SNAKE_CASE :Optional[Any] = key_name[-9:-7]
for i in range(16 ):
__SCREAMING_SNAKE_CASE :int = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
__SCREAMING_SNAKE_CASE :List[Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
__SCREAMING_SNAKE_CASE :Optional[int] = torch.tensor(a_ )
elif key_name.startswith('''model/mlp''' ):
__SCREAMING_SNAKE_CASE :Optional[int] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
__SCREAMING_SNAKE_CASE :Optional[Any] = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
__SCREAMING_SNAKE_CASE :Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :str = torch.tensor(a_ )
elif key_name.endswith('''/p1/bias''' ):
__SCREAMING_SNAKE_CASE :List[Any] = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
__SCREAMING_SNAKE_CASE :List[str] = vnp.copy() # same because it is one dimensional
__SCREAMING_SNAKE_CASE :int = torch.tensor(a_ )
elif key_name.endswith('''/p2/kernel''' ):
__SCREAMING_SNAKE_CASE :Optional[Any] = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
__SCREAMING_SNAKE_CASE :Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :Dict = torch.tensor(a_ )
elif key_name.endswith('''/p2/bias''' ):
__SCREAMING_SNAKE_CASE :Dict = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
__SCREAMING_SNAKE_CASE :Optional[int] = vnp.copy() # same because it is one dimensional
__SCREAMING_SNAKE_CASE :int = torch.tensor(a_ )
elif key_name.startswith('''model/ln''' ):
__SCREAMING_SNAKE_CASE :Tuple = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
__SCREAMING_SNAKE_CASE :Optional[Any] = '''model.blocks.%d.feed_forward.norm.bias''' % player
__SCREAMING_SNAKE_CASE :Dict = vnp.copy() # same because it is one dimensional
__SCREAMING_SNAKE_CASE :List[str] = torch.tensor(a_ )
elif key_name.endswith('''/g''' ):
__SCREAMING_SNAKE_CASE :Any = '''model.blocks.%d.feed_forward.norm.weight''' % player
__SCREAMING_SNAKE_CASE :List[Any] = vnp.copy() # same because it is one dimensional
__SCREAMING_SNAKE_CASE :Tuple = torch.tensor(a_ )
elif key_name.startswith('''model/att''' ):
__SCREAMING_SNAKE_CASE :Tuple = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
__SCREAMING_SNAKE_CASE :Any = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
__SCREAMING_SNAKE_CASE :Union[str, Any] = state[:, 0, :, :]
__SCREAMING_SNAKE_CASE :Dict = state[:, 1, :, :]
__SCREAMING_SNAKE_CASE :Union[str, Any] = state[:, 2, :, :]
__SCREAMING_SNAKE_CASE :Optional[Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :Union[str, Any] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :Tuple = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :Any = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
__SCREAMING_SNAKE_CASE :List[Any] = torch.tensor(a_ )
__SCREAMING_SNAKE_CASE :int = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
__SCREAMING_SNAKE_CASE :str = torch.tensor(a_ )
__SCREAMING_SNAKE_CASE :int = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
__SCREAMING_SNAKE_CASE :str = torch.tensor(a_ )
elif key_name.endswith('''/o/kernel''' ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
__SCREAMING_SNAKE_CASE :Any = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.tensor(a_ )
elif key_name.startswith('''model/an''' ):
__SCREAMING_SNAKE_CASE :Optional[int] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
__SCREAMING_SNAKE_CASE :List[Any] = '''model.blocks.%d.self_attn.norm.bias''' % player
__SCREAMING_SNAKE_CASE :Tuple = vnp.copy() # same because it is one dimensional
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.tensor(a_ )
elif key_name.endswith('''/g''' ):
__SCREAMING_SNAKE_CASE :Optional[int] = '''model.blocks.%d.self_attn.norm.weight''' % player
__SCREAMING_SNAKE_CASE :List[str] = vnp.copy() # same because it is one dimensional
__SCREAMING_SNAKE_CASE :Tuple = torch.tensor(a_ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
__SCREAMING_SNAKE_CASE :str = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
__SCREAMING_SNAKE_CASE :Optional[int] = '''model.%s.weight''' % nlayer
__SCREAMING_SNAKE_CASE :int = vnp.copy() # same in embedded
__SCREAMING_SNAKE_CASE :str = torch.tensor(a_ )
if key_name.startswith('''model/wte''' ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''lm_head.weight'''
__SCREAMING_SNAKE_CASE :Optional[Any] = vnp.copy() # same in embedded
__SCREAMING_SNAKE_CASE :List[str] = torch.tensor(a_ )
elif key_name.startswith('''model/wob''' ):
__SCREAMING_SNAKE_CASE :Any = '''final_logits_bias'''
__SCREAMING_SNAKE_CASE :int = vnp.copy() # same in embedded
__SCREAMING_SNAKE_CASE :List[Any] = state.reshape((1, -1) )
__SCREAMING_SNAKE_CASE :str = torch.tensor(a_ )
elif key_name == "model/dense/kernel":
__SCREAMING_SNAKE_CASE :int = '''model.last_project.weight'''
__SCREAMING_SNAKE_CASE :Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__SCREAMING_SNAKE_CASE :Dict = torch.tensor(a_ )
elif key_name == "model/dense_1/bias":
__SCREAMING_SNAKE_CASE :List[str] = '''model.last_project.bias'''
__SCREAMING_SNAKE_CASE :Any = vnp.copy() # same because it is one dimensional
__SCREAMING_SNAKE_CASE :Optional[Any] = torch.tensor(a_ )
torch.save(a_ , args.output )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
lowerCamelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 498 | 0 |
"""simple docstring"""
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_UpperCamelCase = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def lowerCAmelCase_ ( ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] =_ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowerCamelCase : Union[str, Any] =get_sagemaker_input()
else:
__lowerCamelCase : Optional[Any] =get_cluster_input()
return config
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if subparsers is not None:
__lowerCamelCase : List[Any] =subparsers.add_parser('''config''' , description=SCREAMING_SNAKE_CASE )
else:
__lowerCamelCase : Any =argparse.ArgumentParser('''Accelerate config command''' , description=SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--config_file''' , default=SCREAMING_SNAKE_CASE , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] =get_user_input()
if args.config_file is not None:
__lowerCamelCase : Tuple =args.config_file
else:
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
os.makedirs(SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] =default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(SCREAMING_SNAKE_CASE )
print(F'accelerate configuration saved at {config_file}' )
def lowerCAmelCase_ ( ):
'''simple docstring'''
__lowerCamelCase : int =config_command_parser()
__lowerCamelCase : Optional[int] =parser.parse_args()
config_command(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 718 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :List[str] , *__lowercase :int , **__lowercase :Any ):
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 363 | 0 |
from __future__ import annotations
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = len(__lowerCamelCase )
# We need to create solution object to save path.
_SCREAMING_SNAKE_CASE : Any = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
_SCREAMING_SNAKE_CASE : str = run_maze(__lowerCamelCase, 0, 0, __lowerCamelCase )
if solved:
print("\n".join(str(__lowerCamelCase ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(__lowerCamelCase )
# Final check point.
if i == j == (size - 1):
_SCREAMING_SNAKE_CASE : Optional[Any] = 1
return True
_SCREAMING_SNAKE_CASE : Tuple = (not i < 0) and (not j < 0) # Check lower bounds
_SCREAMING_SNAKE_CASE : Optional[int] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_SCREAMING_SNAKE_CASE : Optional[int] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_SCREAMING_SNAKE_CASE : List[str] = 1
# check for directions
if (
run_maze(__lowerCamelCase, i + 1, __lowerCamelCase, __lowerCamelCase )
or run_maze(__lowerCamelCase, __lowerCamelCase, j + 1, __lowerCamelCase )
or run_maze(__lowerCamelCase, i - 1, __lowerCamelCase, __lowerCamelCase )
or run_maze(__lowerCamelCase, __lowerCamelCase, j - 1, __lowerCamelCase )
):
return True
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 249 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'xlm'
__snake_case = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self , __lowerCamelCase=3_0_1_4_5 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=1 , __lowerCamelCase=True , __lowerCamelCase=5_1_2 , __lowerCamelCase=2_0_4_8**-0.5 , __lowerCamelCase=1E-12 , __lowerCamelCase=0.02 , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=5 , __lowerCamelCase=True , __lowerCamelCase="first" , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=0.1 , __lowerCamelCase=5 , __lowerCamelCase=5 , __lowerCamelCase=0 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase=0 , **__lowerCamelCase , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
_SCREAMING_SNAKE_CASE : Optional[Any] = emb_dim
_SCREAMING_SNAKE_CASE : List[str] = n_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = n_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = dropout
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
_SCREAMING_SNAKE_CASE : Tuple = gelu_activation
_SCREAMING_SNAKE_CASE : int = sinusoidal_embeddings
_SCREAMING_SNAKE_CASE : str = causal
_SCREAMING_SNAKE_CASE : Union[str, Any] = asm
_SCREAMING_SNAKE_CASE : List[str] = n_langs
_SCREAMING_SNAKE_CASE : Optional[int] = use_lang_emb
_SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
_SCREAMING_SNAKE_CASE : Tuple = bos_index
_SCREAMING_SNAKE_CASE : Union[str, Any] = eos_index
_SCREAMING_SNAKE_CASE : str = pad_index
_SCREAMING_SNAKE_CASE : Tuple = unk_index
_SCREAMING_SNAKE_CASE : List[Any] = mask_index
_SCREAMING_SNAKE_CASE : List[str] = is_encoder
_SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[Any] = embed_init_std
_SCREAMING_SNAKE_CASE : str = init_std
_SCREAMING_SNAKE_CASE : Optional[int] = summary_type
_SCREAMING_SNAKE_CASE : int = summary_use_proj
_SCREAMING_SNAKE_CASE : List[str] = summary_activation
_SCREAMING_SNAKE_CASE : Dict = summary_proj_to_labels
_SCREAMING_SNAKE_CASE : Optional[Any] = summary_first_dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = start_n_top
_SCREAMING_SNAKE_CASE : Tuple = end_n_top
_SCREAMING_SNAKE_CASE : str = mask_token_id
_SCREAMING_SNAKE_CASE : Any = lang_id
if "n_words" in kwargs:
_SCREAMING_SNAKE_CASE : Any = kwargs["n_words"]
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , **__lowerCamelCase )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_SCREAMING_SNAKE_CASE : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] ) | 249 | 1 |
'''simple docstring'''
def snake_case__ ( _A: list ) -> list:
'''simple docstring'''
def merge(_A: list , _A: list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_A ) <= 1:
return collection
lowerCAmelCase = len(_A ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase = input('''Enter numbers separated by a comma:\n''').strip()
__lowercase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 605 | '''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case__ ( _A: bytes ) -> bytes:
'''simple docstring'''
if len(_A ) != 32:
raise ValueError("""Input must be of length 32""" )
lowerCAmelCase = b""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case__ ( _A: int ) -> bytes:
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCAmelCase = format(_A , """08x""" )[-8:]
lowerCAmelCase = b""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def snake_case__ ( _A: bytes ) -> bytes:
'''simple docstring'''
lowerCAmelCase = b""""""
for char in message:
bit_string += format(_A , """08b""" ).encode("""utf-8""" )
lowerCAmelCase = format(len(_A ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_A ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case__ ( _A: bytes ) -> Generator[list[int], None, None]:
'''simple docstring'''
if len(_A ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(_A ) , 512 ):
lowerCAmelCase = bit_string[pos : pos + 512]
lowerCAmelCase = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case__ ( _A: int ) -> int:
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCAmelCase = format(_A , """032b""" )
lowerCAmelCase = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_A , 2 )
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
return (a + b) % 2**32
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case__ ( _A: bytes ) -> bytes:
'''simple docstring'''
lowerCAmelCase = preprocess(_A )
lowerCAmelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCAmelCase = 0X6_7_4_5_2_3_0_1
lowerCAmelCase = 0Xe_f_c_d_a_b_8_9
lowerCAmelCase = 0X9_8_b_a_d_c_f_e
lowerCAmelCase = 0X1_0_3_2_5_4_7_6
lowerCAmelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_A ):
lowerCAmelCase = aa
lowerCAmelCase = ba
lowerCAmelCase = ca
lowerCAmelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCAmelCase = d ^ (b & (c ^ d))
lowerCAmelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCAmelCase = c ^ (d & (b ^ c))
lowerCAmelCase = (5 * i + 1) % 16
elif i <= 47:
lowerCAmelCase = b ^ c ^ d
lowerCAmelCase = (3 * i + 5) % 16
else:
lowerCAmelCase = c ^ (b | not_aa(_A ))
lowerCAmelCase = (7 * i) % 16
lowerCAmelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCAmelCase = d
lowerCAmelCase = c
lowerCAmelCase = b
lowerCAmelCase = sum_aa(_A , left_rotate_aa(_A , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCAmelCase = sum_aa(_A , _A )
lowerCAmelCase = sum_aa(_A , _A )
lowerCAmelCase = sum_aa(_A , _A )
lowerCAmelCase = sum_aa(_A , _A )
lowerCAmelCase = reformat_hex(_A ) + reformat_hex(_A ) + reformat_hex(_A ) + reformat_hex(_A )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=False , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.0_2 , __a=3 , __a=4 , __a=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def snake_case ( self , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = DistilBertModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = DistilBertForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = DistilBertForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = DistilBertForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = DistilBertForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = DistilBertForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase : Optional[Any] =(
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] =True
__UpperCAmelCase : int =True
__UpperCAmelCase : Any =True
__UpperCAmelCase : Optional[int] =True
def snake_case ( self ):
__lowerCAmelCase = DistilBertModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , dim=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase__ )
@slow
def snake_case ( self ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DistilBertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@slow
@require_torch_gpu
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__lowerCAmelCase = True
__lowerCAmelCase = model_class(config=UpperCAmelCase__ )
__lowerCAmelCase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = torch.jit.trace(
UpperCAmelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , "traced_model.pt" ) )
__lowerCAmelCase = torch.jit.load(os.path.join(UpperCAmelCase__ , "traced_model.pt" ) , map_location=UpperCAmelCase__ )
loaded(inputs_dict["input_ids"].to(UpperCAmelCase__ ) , inputs_dict["attention_mask"].to(UpperCAmelCase__ ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self ):
__lowerCAmelCase = DistilBertModel.from_pretrained("distilbert-base-uncased" )
__lowerCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
__lowerCAmelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__lowerCAmelCase = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1e-4 ) )
| 636 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f"""{test_file} instead.""" )
__SCREAMING_SNAKE_CASE = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__SCREAMING_SNAKE_CASE = components[:-1] + [test_fn.replace(".py" , "" )]
__SCREAMING_SNAKE_CASE = ".".join(lowerCAmelCase_ )
return test_module_path
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_module_path(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = importlib.import_module(lowerCAmelCase_ )
return test_module
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = get_test_module(lowerCAmelCase_ )
for attr in dir(lowerCAmelCase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = get_test_module(lowerCAmelCase_ )
for attr in dir(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "all_model_classes" , [] )
if len(lowerCAmelCase_ ) > 0:
test_classes.append(lowerCAmelCase_ )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = test_class()
if hasattr(lowerCAmelCase_ , "setUp" ):
test.setUp()
__SCREAMING_SNAKE_CASE = None
if hasattr(lowerCAmelCase_ , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__SCREAMING_SNAKE_CASE = test.model_tester.__class__
return model_tester
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowerCAmelCase_ )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = []
for test_class in test_classes:
__SCREAMING_SNAKE_CASE = get_model_tester_from_test_class(lowerCAmelCase_ )
if tester_class is not None:
tester_classes.append(lowerCAmelCase_ )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {test_class: get_model_tester_from_test_class(lowerCAmelCase_ ) for test_class in test_classes}
return test_tester_mapping
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_model_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
model_class: get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes
}
return model_test_mapping
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_model_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
model_class: get_tester_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return o
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return o.__name__
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return [to_json(lowerCAmelCase_ ) for x in o]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {to_json(lowerCAmelCase_ ): to_json(lowerCAmelCase_ ) for k, v in o.items()}
else:
return o
| 682 | 0 |
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : Any = 'align_text_model'
def __init__( self , UpperCAmelCase=3_05_22 , UpperCAmelCase=7_68 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=30_72 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_12 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase="absolute" , UpperCAmelCase=True , **UpperCAmelCase , ):
super().__init__(**UpperCAmelCase )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = pad_token_id
@classmethod
def lowerCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
cls._set_token_in_kwargs(UpperCAmelCase )
a_ , a_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
a_ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : str = 'align_vision_model'
def __init__( self , UpperCAmelCase = 3 , UpperCAmelCase = 6_00 , UpperCAmelCase = 2.0 , UpperCAmelCase = 3.1 , UpperCAmelCase = 8 , UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase = [32, 16, 24, 40, 80, 1_12, 1_92] , UpperCAmelCase = [16, 24, 40, 80, 1_12, 1_92, 3_20] , UpperCAmelCase = [] , UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase = 0.25 , UpperCAmelCase = "swish" , UpperCAmelCase = 25_60 , UpperCAmelCase = "mean" , UpperCAmelCase = 0.02 , UpperCAmelCase = 0.0_01 , UpperCAmelCase = 0.99 , UpperCAmelCase = 0.2 , **UpperCAmelCase , ):
super().__init__(**UpperCAmelCase )
a_ = num_channels
a_ = image_size
a_ = width_coefficient
a_ = depth_coefficient
a_ = depth_divisor
a_ = kernel_sizes
a_ = in_channels
a_ = out_channels
a_ = depthwise_padding
a_ = strides
a_ = num_block_repeats
a_ = expand_ratios
a_ = squeeze_expansion_ratio
a_ = hidden_act
a_ = hidden_dim
a_ = pooling_type
a_ = initializer_range
a_ = batch_norm_eps
a_ = batch_norm_momentum
a_ = drop_connect_rate
a_ = sum(UpperCAmelCase ) * 4
@classmethod
def lowerCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
cls._set_token_in_kwargs(UpperCAmelCase )
a_ , a_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
a_ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : Optional[int] = 'align'
lowerCamelCase__ : Any = True
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=6_40 , UpperCAmelCase=1.0 , UpperCAmelCase=0.02 , **UpperCAmelCase , ):
super().__init__(**UpperCAmelCase )
if text_config is None:
a_ = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
a_ = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
a_ = AlignTextConfig(**UpperCAmelCase )
a_ = AlignVisionConfig(**UpperCAmelCase )
a_ = projection_dim
a_ = temperature_init_value
a_ = initializer_range
@classmethod
def lowerCAmelCase__ ( cls , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = copy.deepcopy(self.__dict__ )
a_ = self.text_config.to_dict()
a_ = self.vision_config.to_dict()
a_ = self.__class__.model_type
return output
| 720 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ , A__ , A__ , A__ ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
a_ , a_ = array[indexa], array[indexa]
def UpperCamelCase_ ( A__ , A__ , A__ , A__ ):
if length > 1:
a_ = int(length / 2 )
for i in range(A__ , low + middle ):
comp_and_swap(A__ , A__ , i + middle , A__ )
bitonic_merge(A__ , A__ , A__ , A__ )
bitonic_merge(A__ , low + middle , A__ , A__ )
def UpperCamelCase_ ( A__ , A__ , A__ , A__ ):
if length > 1:
a_ = int(length / 2 )
bitonic_sort(A__ , A__ , A__ , 1 )
bitonic_sort(A__ , low + middle , A__ , 0 )
bitonic_merge(A__ , A__ , A__ , A__ )
if __name__ == "__main__":
lowercase__ =input('Enter numbers separated by a comma:\n').strip()
lowercase__ =[int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 511 | 0 |
'''simple docstring'''
from torch import nn
def __lowerCAmelCase ( a_ ) -> Optional[Any]:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 251 | '''simple docstring'''
_lowerCAmelCase :Union[str, Any] = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
_lowerCAmelCase :Union[str, Any] = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def __lowerCAmelCase ( a_ , a_ , a_ ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = from_type.lower().strip('s' )
SCREAMING_SNAKE_CASE : Union[str, Any] = to_type.lower().strip('s' )
SCREAMING_SNAKE_CASE : Dict = UNIT_SYMBOL.get(a_ , a_ )
SCREAMING_SNAKE_CASE : Optional[int] = UNIT_SYMBOL.get(a_ , a_ )
if from_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE : Any = (
f"""Invalid 'from_type' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(a_ )}"""
)
raise ValueError(a_ )
if to_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE : int = (
f"""Invalid 'to_type' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(a_ )}"""
)
raise ValueError(a_ )
SCREAMING_SNAKE_CASE : Dict = METRIC_CONVERSION[from_sanitized]
SCREAMING_SNAKE_CASE : List[str] = METRIC_CONVERSION[to_sanitized]
SCREAMING_SNAKE_CASE : Dict = 1
if from_exponent > to_exponent:
SCREAMING_SNAKE_CASE : Any = from_exponent - to_exponent
else:
SCREAMING_SNAKE_CASE : Tuple = -(to_exponent - from_exponent)
return value * pow(10 , a_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 251 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase: Tuple =logging.get_logger(__name__)
_UpperCamelCase: Any ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class __lowercase( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ = '''transfo-xl'''
UpperCamelCase_ = ['''mems''']
UpperCamelCase_ = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , _lowerCAmelCase : Tuple=26_7735 , _lowerCAmelCase : Optional[int]=[2_0000, 4_0000, 20_0000] , _lowerCAmelCase : Optional[int]=1024 , _lowerCAmelCase : List[str]=1024 , _lowerCAmelCase : Tuple=16 , _lowerCAmelCase : Tuple=64 , _lowerCAmelCase : Any=4096 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : str=18 , _lowerCAmelCase : Union[str, Any]=1600 , _lowerCAmelCase : Dict=1000 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : str=0 , _lowerCAmelCase : List[Any]=-1 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : int=True , _lowerCAmelCase : List[Any]="normal" , _lowerCAmelCase : int=0.01 , _lowerCAmelCase : Union[str, Any]=0.01 , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : Optional[Any]=1e-5 , _lowerCAmelCase : List[Any]=0 , **_lowerCAmelCase : List[Any] , ) -> Tuple:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(_lowerCAmelCase )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
# Message copied from Transformer-XL documentation
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , _lowerCAmelCase : str ) -> int:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 585 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_UpperCamelCase: int =logging.getLogger(__name__)
def _a ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
_lowerCAmelCase = np.argmax(__SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def _a ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding='utf_8' ) as f:
_lowerCAmelCase = csv.reader(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = []
next(__SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(__SCREAMING_SNAKE_CASE ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _a ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
_lowerCAmelCase = []
for dataset in encoded_datasets:
_lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_lowerCAmelCase = np.zeros((n_batch, 2) , dtype=np.intaa )
_lowerCAmelCase = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
_lowerCAmelCase = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_lowerCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_lowerCAmelCase = with_conta
_lowerCAmelCase = with_conta
_lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) - 1
_lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) - 1
_lowerCAmelCase = with_conta
_lowerCAmelCase = with_conta
_lowerCAmelCase = mc_label
_lowerCAmelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def _a ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=__SCREAMING_SNAKE_CASE , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=__SCREAMING_SNAKE_CASE , default='' )
parser.add_argument('--eval_dataset' , type=__SCREAMING_SNAKE_CASE , default='' )
parser.add_argument('--seed' , type=__SCREAMING_SNAKE_CASE , default=42 )
parser.add_argument('--num_train_epochs' , type=__SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument('--train_batch_size' , type=__SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument('--eval_batch_size' , type=__SCREAMING_SNAKE_CASE , default=16 )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__SCREAMING_SNAKE_CASE , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=__SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=__SCREAMING_SNAKE_CASE , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=__SCREAMING_SNAKE_CASE , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=__SCREAMING_SNAKE_CASE , default=6.25E-5 )
parser.add_argument('--warmup_steps' , default=0 , type=__SCREAMING_SNAKE_CASE , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=__SCREAMING_SNAKE_CASE , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=__SCREAMING_SNAKE_CASE , default=0.0_1 )
parser.add_argument('--lm_coef' , type=__SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument('--n_valid' , type=__SCREAMING_SNAKE_CASE , default=374 )
parser.add_argument('--server_ip' , type=__SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' )
_lowerCAmelCase = parser.parse_args()
print(__SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_lowerCAmelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_lowerCAmelCase = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_lowerCAmelCase = ['_start_', '_delimiter_', '_classify_']
_lowerCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__SCREAMING_SNAKE_CASE ) )
model.to(__SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(__SCREAMING_SNAKE_CASE : str ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(__SCREAMING_SNAKE_CASE ) for o in obj]
logger.info('Encoding dataset...' )
_lowerCAmelCase = load_rocstories_dataset(args.train_dataset )
_lowerCAmelCase = load_rocstories_dataset(args.eval_dataset )
_lowerCAmelCase = (train_dataset, eval_dataset)
_lowerCAmelCase = tokenize_and_encode(__SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
_lowerCAmelCase = model.config.n_positions // 2 - 2
_lowerCAmelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_lowerCAmelCase = min(__SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_lowerCAmelCase = pre_process_datasets(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase = tensor_datasets[0], tensor_datasets[1]
_lowerCAmelCase = TensorDataset(*__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = RandomSampler(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
_lowerCAmelCase = TensorDataset(*__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = SequentialSampler(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_lowerCAmelCase = args.max_steps
_lowerCAmelCase = args.max_steps // (len(__SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
_lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
_lowerCAmelCase = list(model.named_parameters() )
_lowerCAmelCase = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
_lowerCAmelCase = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
_lowerCAmelCase = AdamW(__SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
_lowerCAmelCase = get_linear_schedule_with_warmup(
__SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE )
if args.do_train:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = tqdm(__SCREAMING_SNAKE_CASE , desc='Training' )
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = tuple(t.to(__SCREAMING_SNAKE_CASE ) for t in batch )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = batch
_lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , mc_token_ids=__SCREAMING_SNAKE_CASE , lm_labels=__SCREAMING_SNAKE_CASE , mc_labels=__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_lowerCAmelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_lowerCAmelCase = 'Training loss: {:.2e} lr: {:.2e}'.format(__SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_lowerCAmelCase = model.module if hasattr(__SCREAMING_SNAKE_CASE , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_lowerCAmelCase = os.path.join(args.output_dir , __SCREAMING_SNAKE_CASE )
_lowerCAmelCase = os.path.join(args.output_dir , __SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , __SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(__SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_lowerCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_lowerCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
_lowerCAmelCase , _lowerCAmelCase = 0, 0
_lowerCAmelCase , _lowerCAmelCase = 0, 0
for batch in tqdm(__SCREAMING_SNAKE_CASE , desc='Evaluating' ):
_lowerCAmelCase = tuple(t.to(__SCREAMING_SNAKE_CASE ) for t in batch )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = batch
with torch.no_grad():
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , mc_token_ids=__SCREAMING_SNAKE_CASE , lm_labels=__SCREAMING_SNAKE_CASE , mc_labels=__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = mc_logits.detach().cpu().numpy()
_lowerCAmelCase = mc_labels.to('cpu' ).numpy()
_lowerCAmelCase = accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_lowerCAmelCase = eval_loss / nb_eval_steps
_lowerCAmelCase = eval_accuracy / nb_eval_examples
_lowerCAmelCase = tr_loss / nb_tr_steps if args.do_train else None
_lowerCAmelCase = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
_lowerCAmelCase = os.path.join(args.output_dir , 'eval_results.txt' )
with open(__SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 585 | 1 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
# ===== initialization =====
_a : List[str] = Mock()
_a : Optional[Any] = conn, Mock()
_a : Optional[int] = iter([1, None] )
_a : Dict = lambda UpperCamelCase_ : next(UpperCamelCase_ )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=UpperCamelCase_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 471 |
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_a : str = str(bin(UpperCamelCase_ ) )[2:] # remove the leading "0b"
_a : Dict = str(bin(UpperCamelCase_ ) )[2:]
_a : str = max(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase_ ) , b_binary.zfill(UpperCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 471 | 1 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class A_ :
'''simple docstring'''
def __init__( self: List[str] , a: Any , a: List[str]=13 , a: List[Any]=7 , a: Any=True , a: Union[str, Any]=True , a: Any=True , a: Dict=True , a: List[Any]=99 , a: int=64 , a: Optional[int]=5 , a: str=4 , a: Dict=37 , a: str="gelu" , a: Optional[int]=0.1 , a: Tuple=0.1 , a: Union[str, Any]=512 , a: str=16 , a: Optional[Any]=2 , a: Tuple=0.0_2 , a: Union[str, Any]=3 , a: List[str]=4 , a: Dict=None , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Optional[Any] = seq_length
__lowerCamelCase : Union[str, Any] = is_training
__lowerCamelCase : Tuple = use_input_mask
__lowerCamelCase : int = use_token_type_ids
__lowerCamelCase : List[Any] = use_labels
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : int = hidden_act
__lowerCamelCase : str = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : int = type_vocab_size
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Union[str, Any] = initializer_range
__lowerCamelCase : Optional[Any] = num_labels
__lowerCamelCase : Union[str, Any] = num_choices
__lowerCamelCase : str = scope
__lowerCamelCase : Dict = vocab_size - 1
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : int = None
if self.use_input_mask:
__lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : List[Any] = None
if self.use_labels:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def _snake_case ( self: Tuple ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _snake_case ( self: str ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = self.prepare_config_and_inputs()
__lowerCamelCase : Tuple = True
return config, input_ids, input_mask, token_labels
def _snake_case ( self: List[Any] , a: Optional[int] , a: Dict , a: List[str] ):
__lowerCamelCase : str = GPTNeoXModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase : List[str] = model(a , attention_mask=a )
__lowerCamelCase : str = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self: List[Any] , a: Any , a: List[str] , a: Tuple ):
__lowerCamelCase : Dict = True
__lowerCamelCase : Dict = GPTNeoXModel(a )
model.to(a )
model.eval()
__lowerCamelCase : Union[str, Any] = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self: str , a: List[Any] , a: str , a: str , a: Optional[Any] ):
__lowerCamelCase : Optional[Any] = GPTNeoXForCausalLM(config=a )
model.to(a )
model.eval()
__lowerCamelCase : List[str] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self: Union[str, Any] , a: Optional[int] , a: Any , a: Dict , a: str ):
__lowerCamelCase : List[Any] = self.num_labels
__lowerCamelCase : Tuple = GPTNeoXForQuestionAnswering(a )
model.to(a )
model.eval()
__lowerCamelCase : Tuple = model(a , attention_mask=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self: Optional[Any] , a: Dict , a: Any , a: int , a: List[str] ):
__lowerCamelCase : int = self.num_labels
__lowerCamelCase : str = GPTNeoXForSequenceClassification(a )
model.to(a )
model.eval()
__lowerCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[str] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Dict , a: Dict , a: List[Any] , a: List[Any] , a: Tuple ):
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Any = GPTNeoXForTokenClassification(a )
model.to(a )
model.eval()
__lowerCamelCase : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self: Tuple , a: Optional[Any] , a: Tuple , a: int ):
__lowerCamelCase : int = True
__lowerCamelCase : List[str] = GPTNeoXForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
__lowerCamelCase : str = model(a , attention_mask=a , use_cache=a )
__lowerCamelCase : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCamelCase : int = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCamelCase : Tuple = model(a , attention_mask=a , output_hidden_states=a )
__lowerCamelCase : List[Any] = output_from_no_past['hidden_states'][0]
__lowerCamelCase : List[str] = model(
a , attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
__lowerCamelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1e-3 ) )
def _snake_case ( self: Dict ):
__lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = config_and_inputs
__lowerCamelCase : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case = (GPTNeoXForCausalLM,) if is_torch_available() else ()
__snake_case = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: int ):
__lowerCamelCase : str = GPTNeoXModelTester(self )
__lowerCamelCase : int = ConfigTester(self , config_class=a , hidden_size=64 , num_attention_heads=8 )
def _snake_case ( self: List[Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self: List[Any] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _snake_case ( self: List[str] ):
# This regression test was failing with PyTorch < 1.3
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCamelCase : Any = None
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def _snake_case ( self: List[Any] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a )
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def _snake_case ( self: Optional[Any] ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _snake_case ( self: Any , a: Optional[int] ):
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : List[str] = ids_tensor([1, 10] , config.vocab_size )
__lowerCamelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowerCamelCase : Any = GPTNeoXModel(a )
original_model.to(a )
original_model.eval()
__lowerCamelCase : List[Any] = original_model(a ).last_hidden_state
__lowerCamelCase : str = original_model(a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowerCamelCase : int = {'type': scaling_type, 'factor': 1_0.0}
__lowerCamelCase : List[str] = GPTNeoXModel(a )
scaled_model.to(a )
scaled_model.eval()
__lowerCamelCase : List[Any] = scaled_model(a ).last_hidden_state
__lowerCamelCase : List[str] = scaled_model(a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a , a , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(a , a , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a , a , atol=1e-5 ) )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self: Tuple ):
__lowerCamelCase : str = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
__lowerCamelCase : Any = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(a )
__lowerCamelCase : str = tokenizer('My favorite food is' , return_tensors='pt' ).to(a )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__lowerCamelCase : int = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
__lowerCamelCase : int = model.generate(**a , do_sample=a , max_new_tokens=20 )
__lowerCamelCase : Dict = tokenizer.batch_decode(a )[0]
self.assertEqual(a , a )
| 230 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase_ = False
class A_ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Any ):
__lowerCamelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowerCamelCase : Any = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.7_5 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
__lowerCamelCase : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = generator.manual_seed(0 )
__lowerCamelCase : Dict = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.7_5 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _snake_case ( self: int ):
__lowerCamelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = 'cyberpunk 2077'
__lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowerCamelCase : List[Any] = torch.manual_seed(0 )
__lowerCamelCase : Any = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.7_5 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__lowerCamelCase : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : List[str] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__lowerCamelCase : Optional[Any] = 'A painting of a squirrel eating a burger '
__lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCamelCase : Any = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
__lowerCamelCase : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : List[Any] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__lowerCamelCase : List[str] = pipe.image_variation(a , generator=a , output_type='numpy' ).images
__lowerCamelCase : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : Dict = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 230 | 1 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
__snake_case : Optional[Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
__snake_case : str = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
__snake_case : Union[str, Any] = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case = None ,__snake_case = False ,) -> Dict:
if label_map is not None:
for old_id, new_id in label_map.items():
__lowerCAmelCase : int = new_id
# turn into Numpy arrays
__lowerCAmelCase : Dict = np.array(__snake_case )
__lowerCAmelCase : Union[str, Any] = np.array(__snake_case )
if reduce_labels:
__lowerCAmelCase : Optional[Any] = 255
__lowerCAmelCase : int = label - 1
__lowerCAmelCase : Union[str, Any] = 255
__lowerCAmelCase : Any = label != ignore_index
__lowerCAmelCase : Optional[int] = np.not_equal(__snake_case ,__snake_case )
__lowerCAmelCase : int = pred_label[mask]
__lowerCAmelCase : Tuple = np.array(__snake_case )[mask]
__lowerCAmelCase : Optional[int] = pred_label[pred_label == label]
__lowerCAmelCase : Tuple = np.histogram(__snake_case ,bins=__snake_case ,range=(0, num_labels - 1) )[0]
__lowerCAmelCase : Any = np.histogram(__snake_case ,bins=__snake_case ,range=(0, num_labels - 1) )[0]
__lowerCAmelCase : List[str] = np.histogram(__snake_case ,bins=__snake_case ,range=(0, num_labels - 1) )[0]
__lowerCAmelCase : int = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case = None ,__snake_case = False ,) -> Union[str, Any]:
__lowerCAmelCase : Union[str, Any] = np.zeros((num_labels,) ,dtype=np.floataa )
__lowerCAmelCase : List[str] = np.zeros((num_labels,) ,dtype=np.floataa )
__lowerCAmelCase : Dict = np.zeros((num_labels,) ,dtype=np.floataa )
__lowerCAmelCase : str = np.zeros((num_labels,) ,dtype=np.floataa )
for result, gt_seg_map in zip(__snake_case ,__snake_case ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = intersect_and_union(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case = None ,__snake_case = None ,__snake_case = False ,) -> int:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = total_intersect_and_union(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
# compute metrics
__lowerCAmelCase : Optional[Any] = {}
__lowerCAmelCase : List[Any] = total_area_intersect.sum() / total_area_label.sum()
__lowerCAmelCase : List[Any] = total_area_intersect / total_area_union
__lowerCAmelCase : Tuple = total_area_intersect / total_area_label
__lowerCAmelCase : List[Any] = np.nanmean(__snake_case )
__lowerCAmelCase : List[str] = np.nanmean(__snake_case )
__lowerCAmelCase : List[Any] = all_acc
__lowerCAmelCase : Union[str, Any] = iou
__lowerCAmelCase : List[Any] = acc
if nan_to_num is not None:
__lowerCAmelCase : List[str] = {metric: np.nan_to_num(__snake_case ,nan=__snake_case ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))),
}) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: bool , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[Dict[int, int]] = None , _SCREAMING_SNAKE_CASE: bool = False , ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = mean_iou(
results=_SCREAMING_SNAKE_CASE , gt_seg_maps=_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , ignore_index=_SCREAMING_SNAKE_CASE , nan_to_num=_SCREAMING_SNAKE_CASE , label_map=_SCREAMING_SNAKE_CASE , reduce_labels=_SCREAMING_SNAKE_CASE , )
return iou_result | 293 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Any = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'xglm'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any]=25_6008 , _SCREAMING_SNAKE_CASE: Dict=2048 , _SCREAMING_SNAKE_CASE: int=1024 , _SCREAMING_SNAKE_CASE: Dict=4096 , _SCREAMING_SNAKE_CASE: Optional[Any]=24 , _SCREAMING_SNAKE_CASE: int=16 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: Optional[Any]=0.1 , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.0 , _SCREAMING_SNAKE_CASE: Any=0.02 , _SCREAMING_SNAKE_CASE: Any=True , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: str=1 , _SCREAMING_SNAKE_CASE: Dict=0 , _SCREAMING_SNAKE_CASE: Dict=2 , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : int = max_position_embeddings
__lowerCAmelCase : Optional[Any] = d_model
__lowerCAmelCase : List[Any] = ffn_dim
__lowerCAmelCase : int = num_layers
__lowerCAmelCase : Any = attention_heads
__lowerCAmelCase : int = activation_function
__lowerCAmelCase : List[Any] = dropout
__lowerCAmelCase : Optional[int] = attention_dropout
__lowerCAmelCase : Optional[int] = activation_dropout
__lowerCAmelCase : Optional[int] = layerdrop
__lowerCAmelCase : Optional[int] = init_std
__lowerCAmelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : Dict = use_cache
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) | 293 | 1 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : List[str] , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Tuple ):
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for a, b in zip(lowercase__ , lowercase__ ):
self.assertAlmostEqual(lowercase__ , lowercase__ , delta=lowercase__ )
def snake_case ( self : str ):
__lowercase : Optional[int] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowercase__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def snake_case ( self : Optional[int] ):
__lowercase : Dict = None
ops.enable_eager_execution_internal()
__lowercase : Optional[Any] = tf.config.list_physical_devices("CPU" )
if len(lowercase__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__lowercase : Dict = tf.config.list_logical_devices(device_type="CPU" )
__lowercase : Dict = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__lowercase : Optional[int] = GradientAccumulator()
__lowercase : Tuple = tf.Variable([4.0, 3.0] )
__lowercase : Any = create_optimizer(5e-5 , 1_0 , 5 )
__lowercase : int = tf.Variable([0.0, 0.0] , trainable=lowercase__ )
def accumulate_on_replica(lowercase__ : int ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(lowercase__ : Any , lowercase__ : List[Any] ):
with strategy.scope():
__lowercase : Dict = strategy.experimental_local_results(lowercase__ )
local_variables[0].assign(lowercase__ )
local_variables[1].assign(lowercase__ )
strategy.run(lowercase__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowercase__ )
def _check_local_values(lowercase__ : Dict , lowercase__ : Optional[int] ):
__lowercase : str = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowercase__ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , lowercase__ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 710 |
"""simple docstring"""
import os
import sys
import unittest
__A : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : str ):
__lowercase : int = find_backend(" if not is_torch_available():" )
self.assertEqual(lowercase__ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__lowercase : int = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(lowercase__ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__lowercase : str = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(lowercase__ , "torch_and_transformers_and_onnx" )
def snake_case ( self : Any ):
__lowercase : List[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , lowercase__ )
self.assertIn("torch_and_transformers" , lowercase__ )
self.assertIn("flax_and_transformers" , lowercase__ )
self.assertIn("torch_and_transformers_and_onnx" , lowercase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def snake_case ( self : Dict ):
__lowercase : Tuple = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(lowercase__ , "\nCONSTANT = None\n" )
__lowercase : Union[str, Any] = create_dummy_object("function" , "'torch'" )
self.assertEqual(
lowercase__ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
__lowercase : Tuple = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
__lowercase : Dict = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(lowercase__ , lowercase__ )
def snake_case ( self : List[Any] ):
__lowercase : Optional[int] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
__lowercase : List[str] = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , lowercase__ )
| 281 | 0 |
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase):
UpperCamelCase_ , UpperCamelCase_ = len(__lowercase), len(grid[0])
if (
min(__lowercase , __lowercase) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col))
UpperCamelCase_ = 0
count += depth_first_search(__lowercase , row + 1 , __lowercase , __lowercase)
count += depth_first_search(__lowercase , row - 1 , __lowercase , __lowercase)
count += depth_first_search(__lowercase , __lowercase , col + 1 , __lowercase)
count += depth_first_search(__lowercase , __lowercase , col - 1 , __lowercase)
visit.remove((row, col))
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = StableDiffusionSAGPipeline
A_ = TEXT_TO_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_BATCH_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCamelCase_ = CLIPTextModel(_UpperCAmelCase )
UpperCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> List[Any]:
if str(_UpperCAmelCase ).startswith('mps' ):
UpperCamelCase_ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCamelCase_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCamelCase_ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ = output.images
UpperCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ = sag_pipe.to(_UpperCAmelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCamelCase_ = '.'
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=_UpperCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
UpperCamelCase_ = output.images
assert image.shape == (1, 512, 768, 3)
| 23 | 1 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCamelCase : List[str] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def A__ ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] , __A : List[str] , __A : Union[str, Any] ) ->str:
for attribute in key.split('''.''' ):
__A =getattr(__A , __A )
if weight_type is not None:
__A =getattr(__A , __A ).shape
else:
__A =hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__A =value
elif weight_type == "weight_g":
__A =value
elif weight_type == "weight_v":
__A =value
elif weight_type == "bias":
__A =value
else:
__A =value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A__ ( __A : int , __A : str ) ->List[str]:
__A =[]
__A =fairseq_model.state_dict()
__A =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__A =None
for name, value in fairseq_dict.items():
__A =False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == '''group''' , )
__A =True
elif name.split('''.''' )[0] == "proj":
__A =fairseq_model.proj
__A =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__A =True
if "*" in mapped_key:
__A =name.split(__A )[0].split('''.''' )[-2]
__A =mapped_key.replace('''*''' , __A )
if "weight_g" in name:
__A ='''weight_g'''
elif "weight_v" in name:
__A ='''weight_v'''
elif "bias" in name:
__A ='''bias'''
elif "weight" in name:
__A ='''weight'''
else:
__A =None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def A__ ( __A : str , __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : str ) ->Optional[Any]:
__A =full_name.split('''conv_layers.''' )[-1]
__A =name.split('''.''' )
__A =int(items[0] )
__A =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__A =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__A =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__A =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__A =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__A )
def A__ ( __A : Optional[Any] ) ->List[Any]:
__A , __A =emb.weight.shape
__A =nn.Linear(__A , __A , bias=__A )
__A =emb.weight.data
return lin_layer
def A__ ( __A : Dict ) ->Optional[int]:
with open(__A , '''r''' , encoding='''utf-8''' ) as f:
__A =f.readlines()
__A =[line.split(''' ''' )[0] for line in lines]
__A =len(__A )
__A ={
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def A__ ( __A : List[Any] , __A : Optional[Any] , __A : Tuple , __A : int , __A : str , __A : str , __A : Dict , ) ->Tuple:
__A =WavaVecaConfig.from_pretrained(__A )
__A =SpeechaTextaConfig.from_pretrained(
__A , vocab_size=__A , decoder_layers=__A , do_stable_layer_norm=__A )
__A =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
__A , __A , __A =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__A =model[0].eval()
# set weights for wav2vec2 encoder
__A =WavaVecaModel(__A )
__A =recursively_load_weights_wavaveca(model.encoder , __A )
__A =SpeechaTextaForCausalLM(__A )
__A , __A =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__A )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
__A =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__A =SpeechEncoderDecoderModel(encoder=__A , decoder=__A )
__A =False
# add projection layer
__A =nn.Parameter(projection_layer.weight )
__A =nn.Parameter(projection_layer.bias )
__A =create_vocab_dict(__A )
with open(os.path.join(__A , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(__A , __A )
__A =SpeechaTextaTokenizer(os.path.join(__A , '''vocab.json''' ) )
tokenizer.save_pretrained(__A )
__A =hf_wavavec.config.to_dict()
__A =tokenizer.pad_token_id
__A =tokenizer.bos_token_id
__A =tokenizer.eos_token_id
__A ='''speech_to_text_2'''
__A ='''wav2vec2'''
__A =SpeechEncoderDecoderConfig.from_dict(__A )
hf_wavavec.save_pretrained(__A )
feature_extractor.save_pretrained(__A )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
_lowerCamelCase : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 516 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_lowerCamelCase : Optional[int] = TypeVar('''T''')
class lowerCAmelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =None
__A =len(lowercase__ )
__A =[any_type for _ in range(self.N )] + arr
__A =fnc
self.build()
def __UpperCamelCase ( self ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
__A =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
p += self.N
__A =v
while p > 1:
__A =p // 2
__A =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCamelCase ( self , lowercase__ , lowercase__ ): # noqa: E741
'''simple docstring'''
__A , __A =l + self.N, r + self.N
__A =None
while l <= r:
if l % 2 == 1:
__A =self.st[l] if res is None else self.fn(lowercase__ , self.st[l] )
if r % 2 == 0:
__A =self.st[r] if res is None else self.fn(lowercase__ , self.st[r] )
__A , __A =(l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_lowerCamelCase : Dict = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_lowerCamelCase : Union[str, Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_lowerCamelCase : Dict = SegmentTree(test_array, min)
_lowerCamelCase : int = SegmentTree(test_array, max)
_lowerCamelCase : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ) ->None:
for i in range(len(__A ) ):
for j in range(__A , len(__A ) ):
__A =reduce(__A , test_array[i : j + 1] )
__A =reduce(__A , test_array[i : j + 1] )
__A =reduce(lambda __A , __A : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__A , __A )
assert max_range == max_segment_tree.query(__A , __A )
assert sum_range == sum_segment_tree.query(__A , __A )
test_all_segments()
for index, value in test_updates.items():
_lowerCamelCase : Union[str, Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 516 | 1 |
from scipy.stats import spearmanr
import datasets
__lowerCAmelCase = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
__lowerCAmelCase = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
__lowerCAmelCase = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase ( self ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> Any:
"""simple docstring"""
_UpperCamelCase = spearmanr(snake_case_ , snake_case_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 147 |
from maths.prime_factors import prime_factors
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowercase = F'Input value of [number={number}] must be an integer'
raise TypeError(_UpperCamelCase )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(_UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639 | 0 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = '''https://openaipublic.azureedge.net/jukebox/models/'''
__A = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def _SCREAMING_SNAKE_CASE ( A : Dict ) -> Optional[Any]:
"""simple docstring"""
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
__snake_case : Union[str, Any] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
__snake_case : Optional[int] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
__snake_case : Optional[Any] = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
__snake_case : Dict = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
__snake_case : Optional[int] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
__snake_case : int = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__snake_case : Optional[Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
__snake_case : int = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def _SCREAMING_SNAKE_CASE ( A : Optional[int] , A : Tuple , A : int , A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : str = {}
import re
__snake_case : Any = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__snake_case : Union[str, Any] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__snake_case : Dict = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__snake_case : Union[str, Any] = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__snake_case : List[Any] = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__snake_case : Any = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__snake_case : Tuple = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
__snake_case : List[str] = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__snake_case : str = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A ):
__snake_case : Optional[Any] = re_encoder_block_conv_in.match(A )
__snake_case : Optional[int] = regex_match.groups()
__snake_case : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
__snake_case : Dict = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
__snake_case : Dict = re_encoder_block_conv_in.sub(A , A )
elif re_encoder_block_resnet.fullmatch(A ):
__snake_case : str = re_encoder_block_resnet.match(A )
__snake_case : Tuple = regex_match.groups()
__snake_case : List[str] = int(groups[2] ) * 2 + int(groups[3] )
__snake_case : Optional[Any] = {'1': 1, '3': 2}[groups[-2]]
__snake_case : str = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
__snake_case : List[Any] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__snake_case : Tuple = prefix + resnet_block
__snake_case : Optional[Any] = re_encoder_block_resnet.sub(A , A )
elif re_encoder_block_proj_out.fullmatch(A ):
__snake_case : Tuple = re_encoder_block_proj_out.match(A )
__snake_case : Optional[Any] = regex_match.groups()
__snake_case : Optional[int] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
__snake_case : Union[str, Any] = re_encoder_block_proj_out.sub(A , A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A ):
__snake_case : Dict = re_decoder_block_conv_out.match(A )
__snake_case : Union[str, Any] = regex_match.groups()
__snake_case : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2
__snake_case : List[str] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
__snake_case : str = re_decoder_block_conv_out.sub(A , A )
elif re_decoder_block_resnet.fullmatch(A ):
__snake_case : Any = re_decoder_block_resnet.match(A )
__snake_case : int = regex_match.groups()
__snake_case : Dict = int(groups[2] ) * 2 + int(groups[3] ) - 2
__snake_case : int = {'1': 1, '3': 2}[groups[-2]]
__snake_case : Optional[int] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
__snake_case : Dict = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__snake_case : Union[str, Any] = prefix + resnet_block
__snake_case : Any = re_decoder_block_resnet.sub(A , A )
elif re_decoder_block_proj_in.fullmatch(A ):
__snake_case : int = re_decoder_block_proj_in.match(A )
__snake_case : Optional[Any] = regex_match.groups()
__snake_case : Optional[int] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
__snake_case : Optional[int] = re_decoder_block_proj_in.sub(A , A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A ):
__snake_case : str = re_prior_cond_conv_out.match(A )
__snake_case : Optional[int] = regex_match.groups()
__snake_case : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
__snake_case : Tuple = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
__snake_case : Tuple = re_prior_cond_conv_out.sub(A , A )
elif re_prior_cond_resnet.fullmatch(A ):
__snake_case : Dict = re_prior_cond_resnet.match(A )
__snake_case : Optional[int] = regex_match.groups()
__snake_case : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
__snake_case : Any = {'1': 1, '3': 2}[groups[-2]]
__snake_case : Any = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
__snake_case : List[str] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__snake_case : int = prefix + resnet_block
__snake_case : List[str] = re_prior_cond_resnet.sub(A , A )
elif re_prior_cond_proj_in.fullmatch(A ):
__snake_case : Optional[Any] = re_prior_cond_proj_in.match(A )
__snake_case : str = regex_match.groups()
__snake_case : Optional[Any] = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
__snake_case : Tuple = re_prior_cond_proj_in.sub(A , A )
# keep original key
else:
__snake_case : int = original_key
__snake_case : Optional[int] = replace_key(A )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
__snake_case : str = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
__snake_case : List[str] = original_key
__snake_case : List[Any] = original_key
__snake_case : Union[str, Any] = value
return new_dict
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( A : str=None , A : int=None ) -> str:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
__snake_case : Union[str, Any] = requests.get(F"""{PREFIX}{file}""" , allow_redirects=A )
os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=A )
open(F"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , 'wb' ).write(r.content )
__snake_case : Union[str, Any] = MODEL_MAPPING[model_name.split('/' )[-1]]
__snake_case : Any = JukeboxConfig.from_pretrained(A )
__snake_case : Optional[Any] = JukeboxModel(A )
__snake_case : Optional[int] = []
__snake_case : Union[str, Any] = {}
for i, dict_name in enumerate(A ):
__snake_case : List[Any] = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['model']
__snake_case : List[Any] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
__snake_case : Any = old_dic[k]
elif k.endswith('.w' ):
__snake_case : List[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__snake_case : str = old_dic[k]
else:
__snake_case : List[Any] = old_dic[k]
__snake_case : Union[str, Any] = 'vqvae' if i == 0 else F"""priors.{3 - i}"""
__snake_case : Any = fix_jukebox_keys(A , model.state_dict() , A , A )
weight_dict.append(A )
__snake_case : Any = weight_dict.pop(0 )
model.vqvae.load_state_dict(A )
for i in range(len(A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A ).mkdir(exist_ok=A )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(A , A )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
return weight_dict
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
__A = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 705 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 61 | 0 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
A: Optional[int] = True
except (ImportError, ModuleNotFoundError):
A: List[Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def _snake_case ( UpperCamelCase : str ):
re.sub("""<n>""" , """""" , UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase ) )
| 160 |
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] ):
# Initialise PyTorch model
UpperCAmelCase : int = FunnelConfig.from_json_file(UpperCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
UpperCAmelCase : Dict = FunnelBaseModel(UpperCamelCase ) if base_model else FunnelModel(UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , UpperCamelCase )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
A: Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 160 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self : List[Any] , snake_case_ : Any , snake_case_ : List[str]=1_3 , snake_case_ : Any=7 , snake_case_ : Union[str, Any]=True , snake_case_ : List[str]=True , snake_case_ : Dict=True , snake_case_ : Tuple=True , snake_case_ : Optional[int]=9_9 , snake_case_ : Optional[int]=3_2 , snake_case_ : List[str]=5 , snake_case_ : List[str]=4 , snake_case_ : int=3_7 , snake_case_ : Tuple="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : Optional[Any]=1_2_8 , snake_case_ : Dict=3_2 , snake_case_ : List[Any]=1_6 , snake_case_ : List[Any]=2 , snake_case_ : int=0.0_2 , snake_case_ : List[str]=3 , snake_case_ : Any=4 , snake_case_ : Optional[int]=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Any ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowercase ( self : Dict ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Any , snake_case_ : Any , snake_case_ : Dict , snake_case_ : List[Any] ):
_UpperCAmelCase = NezhaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
_UpperCAmelCase = model(snake_case_ , token_type_ids=snake_case_ )
_UpperCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self : str , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : int , snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : List[str] , ):
_UpperCAmelCase = True
_UpperCAmelCase = NezhaModel(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , encoder_hidden_states=snake_case_ , )
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase ( self : Any , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Dict ):
_UpperCAmelCase = NezhaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Optional[Any] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : List[str] ):
_UpperCAmelCase = NezhaForNextSentencePrediction(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase ( self : Optional[int] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : int , snake_case_ : Dict ):
_UpperCAmelCase = NezhaForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , next_sentence_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase ( self : int , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : int ):
_UpperCAmelCase = NezhaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : Optional[int] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : str , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Optional[int] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NezhaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Optional[Any] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = NezhaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : str , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Any ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = NezhaForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : int = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase : Tuple = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Tuple = True
def lowercase ( self : str , snake_case_ : List[str] , snake_case_ : str , snake_case_ : str=False ):
_UpperCAmelCase = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def lowercase ( self : str ):
_UpperCAmelCase = NezhaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def lowercase ( self : str ):
self.config_tester.run_common_tests()
def lowercase ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case_ )
def lowercase ( self : int ):
# This regression test was failing with PyTorch < 1.3
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_UpperCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
def lowercase ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowercase ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def lowercase ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowercase ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowercase ( self : List[Any] ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = NezhaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@slow
@require_torch_gpu
def lowercase ( self : Tuple ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_UpperCAmelCase = True
_UpperCAmelCase = model_class(config=snake_case_ )
_UpperCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ )
_UpperCAmelCase = torch.jit.trace(
snake_case_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case_ , os.path.join(snake_case_ , "bert.pt" ) )
_UpperCAmelCase = torch.jit.load(os.path.join(snake_case_ , "bert.pt" ) , map_location=snake_case_ )
loaded(inputs_dict["input_ids"].to(snake_case_ ) , inputs_dict["attention_mask"].to(snake_case_ ) )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
_UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ )[0]
_UpperCAmelCase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4 ) )
@slow
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
_UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ )[0]
_UpperCAmelCase = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape , snake_case_ )
_UpperCAmelCase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4 ) )
| 119 |
'''simple docstring'''
from math import ceil
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : int ) -> Any:
'''simple docstring'''
_UpperCAmelCase = list(range(0 , __lowercase ) )
_UpperCAmelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_UpperCAmelCase = []
for i in device_map_blocks:
if device_map_blocks.count(__lowercase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__lowercase )
# Missing blocks
_UpperCAmelCase = [i for i in blocks if i not in device_map_blocks]
_UpperCAmelCase = [i for i in device_map_blocks if i not in blocks]
if len(__lowercase ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(__lowercase ) )
if len(__lowercase ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(__lowercase ) )
if len(__lowercase ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(__lowercase ) )
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = list(range(__lowercase ) )
_UpperCAmelCase = int(ceil(n_layers / len(__lowercase ) ) )
_UpperCAmelCase = [layers[i : i + n_blocks] for i in range(0 , __lowercase , __lowercase )]
return dict(zip(__lowercase , __lowercase ) )
| 119 | 1 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_a : Dict = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_a : Optional[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase = SavedModel()
UpperCAmelCase = []
with open(os.path.join(__UpperCAmelCase , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
UpperCAmelCase = json.load(__UpperCAmelCase )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__UpperCAmelCase )] )
with open(__UpperCAmelCase , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
UpperCAmelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCAmelCase = sorted(__UpperCAmelCase )
UpperCAmelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__UpperCAmelCase )
if strict and len(__UpperCAmelCase ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__UpperCAmelCase ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*__UpperCAmelCase , sep='\n' )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
_a : str = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 447 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : List[Any] = 'docs/source/en/_toctree.yml'
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = defaultdict(__UpperCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
snake_case_ = [key for key, value in counts.items() if value > 1]
snake_case_ = []
for duplicate_key in duplicates:
snake_case_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__UpperCAmelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__UpperCAmelCase, key=lambda __UpperCAmelCase : s["title"].lower() )
def __magic_name__ ( __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
with open(__UpperCAmelCase, encoding='''utf-8''' ) as f:
snake_case_ = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ = content[api_idx]['''sections''']
# Then to the model doc
snake_case_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case_ = api_doc[model_idx]['''sections''']
snake_case_ = [(idx, section) for idx, section in enumerate(__UpperCAmelCase ) if '''sections''' in section]
snake_case_ = False
for idx, modality_doc in modalities_docs:
snake_case_ = modality_doc['''sections''']
snake_case_ = clean_model_doc_toc(__UpperCAmelCase )
if old_modality_doc != new_modality_doc:
snake_case_ = True
if overwrite:
snake_case_ = new_modality_doc
if diff:
if overwrite:
snake_case_ = model_doc
snake_case_ = api_doc
with open(__UpperCAmelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(yaml.dump(__UpperCAmelCase, allow_unicode=__UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a : Union[str, Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 640 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 1_00 * 2**20, 9_00 * 2**20] )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , SCREAMING_SNAKE_CASE )
lowercase__ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase__ = dataset_size < in_memory_max_size
else:
lowercase__ = False
lowercase__ = is_small_dataset(SCREAMING_SNAKE_CASE )
assert result == expected
| 429 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.feature_extractor
lowercase__ = hf_model.adapter
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
lowercase__ = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
lowercase__ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase__ = '''weight_g'''
elif "weight_v" in name:
lowercase__ = '''weight_v'''
elif "bias" in name:
lowercase__ = '''bias'''
elif "weight" in name:
lowercase__ = '''weight'''
else:
lowercase__ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'Unused weights: {unused_weights}' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = full_name.split('''conv_layers.''' )[-1]
lowercase__ = name.split('''.''' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = full_name.split('''adaptor.''' )[-1]
lowercase__ = name.split('''.''' )
if items[1].isdigit():
lowercase__ = int(items[1] )
else:
lowercase__ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
lowercase__ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase__ = WavaVecaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , add_adapter=SCREAMING_SNAKE_CASE , adapter_stride=SCREAMING_SNAKE_CASE , adapter_kernel_size=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , output_hidden_size=SCREAMING_SNAKE_CASE , )
lowercase__ = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE )
# load model
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
lowercase__ = model[0].eval()
# load feature extractor
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE )
# set weights for wav2vec2 encoder
lowercase__ = WavaVecaModel(SCREAMING_SNAKE_CASE )
recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
# load decoder weights
lowercase__ = MBartForCausalLM(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowercase__ = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
lowercase__ = False
lowercase__ = MBartaaTokenizer(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ = hf_wavavec.config.to_dict()
lowercase__ = tokenizer.pad_token_id
lowercase__ = tokenizer.bos_token_id
lowercase__ = tokenizer.eos_token_id
lowercase__ = '''mbart50'''
lowercase__ = '''wav2vec2'''
lowercase__ = tokenizer.eos_token_id
lowercase__ = 25_00_04
lowercase__ = tokenizer.eos_token_id
lowercase__ = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=25_0004, type=int, help='`decoder_start_token_id` of model config')
lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 429 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def _lowercase( __a : int = 100_0000 , __a : int = 10 ):
a__ =defaultdict(__a )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
a__ =max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
a__ =1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__a , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 |
from __future__ import annotations
from collections import namedtuple
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple:
_UpperCAmelCase = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__a = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Let's go
__a = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ):
parser.print_help()
exit(1 )
# Run
__a = args.func(_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 717 |
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple=False ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a = len(set_a.intersection(_SCREAMING_SNAKE_CASE ) )
if alternative_union:
__a = len(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE )
else:
__a = len(set_a.union(_SCREAMING_SNAKE_CASE ) )
return intersection / union
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
__a = [element for element in set_a if element in set_b]
if alternative_union:
__a = len(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE )
return len(_SCREAMING_SNAKE_CASE ) / union
else:
__a = set_a + [element for element in set_b if element not in set_a]
return len(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
return len(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
return None
if __name__ == "__main__":
lowerCamelCase__ = {"""a""", """b""", """c""", """d""", """e"""}
lowerCamelCase__ = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 547 | 0 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowercase : Optional[Any] =logging.get_logger(__name__)
class A ( __lowercase ):
def __init__( self: int , *_lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 54 |
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
lowercase__ = 5
lowercase__ = 10
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( __snake_case , unittest.TestCase ):
"""simple docstring"""
snake_case = SpeechaTextTokenizer
snake_case = False
snake_case = True
def _lowercase ( self ):
super().setUp()
snake_case_ = sp.SentencePieceProcessor()
spm_model.Load(_lowercase )
snake_case_ = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowercase ) )]
snake_case_ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ = Path(self.tmpdirname )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
snake_case_ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self ):
snake_case_ = """<pad>"""
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def _lowercase ( self ):
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_lowercase ) , 10_01 )
def _lowercase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def _lowercase ( self ):
snake_case_ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
snake_case_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [2_89, 50, 14, 1_74, 3_86] , )
snake_case_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
snake_case_ = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
snake_case_ = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def _lowercase ( self ):
snake_case_ = {"""input_ids""": [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
snake_case = """valhalla/s2t_mustc_multilinguial_medium"""
snake_case = """C'est trop cool"""
snake_case = """Esto es genial"""
@classmethod
def _lowercase ( cls ):
snake_case_ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def _lowercase ( self ):
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def _lowercase ( self ):
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def _lowercase ( self ):
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
snake_case_ = [ES_CODE, 4, 16_01, 47, 76_47, 2]
snake_case_ = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
snake_case_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def _lowercase ( self ):
snake_case_ = """fr"""
snake_case_ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _lowercase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def _lowercase ( self ):
snake_case_ = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
snake_case_ = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 719 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __snake_case ( lowercase : NDArray[floataa] , lowercase : NDArray[floataa] , lowercase : list[int] , lowercase : int , ):
snake_case_ , snake_case_ = coefficient_matrix.shape
snake_case_ , snake_case_ = constant_matrix.shape
if rowsa != colsa:
snake_case_ = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(lowercase )
if colsa != 1:
snake_case_ = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(lowercase )
if rowsa != rowsa:
snake_case_ = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(lowercase )
if len(lowercase ) != rowsa:
snake_case_ = (
"Number of initial values must be equal to number of rows in coefficient "
f'''matrix but received {len(lowercase )} and {rowsa}'''
)
raise ValueError(lowercase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
snake_case_ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
snake_case_ , snake_case_ = table.shape
strictly_diagonally_dominant(lowercase )
# Iterates the whole matrix for given number of times
for _ in range(lowercase ):
snake_case_ = []
for row in range(lowercase ):
snake_case_ = 0
for col in range(lowercase ):
if col == row:
snake_case_ = table[row][col]
elif col == cols - 1:
snake_case_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
snake_case_ = (temp + val) / denom
new_val.append(lowercase )
snake_case_ = new_val
return [float(lowercase ) for i in new_val]
def __snake_case ( lowercase : NDArray[floataa] ):
snake_case_ , snake_case_ = table.shape
snake_case_ = True
for i in range(0 , lowercase ):
snake_case_ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A : Dict = logging.get_logger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[Any] , **__lowerCamelCase : Any ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE = deprecated_arg[3:]
SCREAMING_SNAKE_CASE = not kwargs.pop(__lowerCamelCase )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
SCREAMING_SNAKE_CASE = kwargs.pop("tpu_name" , self.tpu_name )
SCREAMING_SNAKE_CASE = kwargs.pop("device_idx" , self.device_idx )
SCREAMING_SNAKE_CASE = kwargs.pop("eager_mode" , self.eager_mode )
SCREAMING_SNAKE_CASE = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Name of TPU"} , )
lowerCamelCase__ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _snake_case ( self : Optional[int] ):
requires_backends(self , ["tf"] )
SCREAMING_SNAKE_CASE = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE = None
return tpu
@cached_property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}" )
return strategy
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def _snake_case ( self : Optional[Any] ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def _snake_case ( self : List[str] ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def _snake_case ( self : Any ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _snake_case ( self : Dict ):
return self.n_gpu > 0 | 16 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_UpperCamelCase = datasets.utils.logging.get_logger(__name__)
_UpperCamelCase = ['''names''', '''prefix''']
_UpperCamelCase = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
_UpperCamelCase = ['''encoding_errors''', '''on_bad_lines''']
_UpperCamelCase = ['''date_format''']
@dataclass
class _lowerCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase_ : str =","
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : Optional[Union[int, List[int], str]] ="infer"
UpperCAmelCase_ : Optional[List[str]] =None
UpperCAmelCase_ : Optional[List[str]] =None
UpperCAmelCase_ : Optional[Union[int, str, List[int], List[str]]] =None
UpperCAmelCase_ : Optional[Union[List[int], List[str]]] =None
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : Optional[Literal["c", "python", "pyarrow"]] =None
UpperCAmelCase_ : Dict[Union[int, str], Callable[[Any], Any]] =None
UpperCAmelCase_ : Optional[list] =None
UpperCAmelCase_ : Optional[list] =None
UpperCAmelCase_ : bool =False
UpperCAmelCase_ : Optional[Union[int, List[int]]] =None
UpperCAmelCase_ : Optional[int] =None
UpperCAmelCase_ : Optional[Union[str, List[str]]] =None
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : bool =False
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : str ="."
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : str ='"'
UpperCAmelCase_ : int =0
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : int =0
UpperCAmelCase_ : bool =True
UpperCAmelCase_ : bool =False
UpperCAmelCase_ : Optional[str] =None
UpperCAmelCase_ : int =10_000
UpperCAmelCase_ : Optional[datasets.Features] =None
UpperCAmelCase_ : Optional[str] ="strict"
UpperCAmelCase_ : Literal["error", "warn", "skip"] ="error"
UpperCAmelCase_ : Optional[str] =None
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
if self.delimiter is not None:
__snake_case : List[str] = self.delimiter
if self.column_names is not None:
__snake_case : Optional[Any] = self.column_names
@property
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case : Any = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , UpperCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase_ : Dict =CsvConfig
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__snake_case : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase , (str, list, tuple) ):
__snake_case : int = data_files
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__snake_case : Any = [files]
__snake_case : Optional[int] = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
__snake_case : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__snake_case : str = [files]
__snake_case : int = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase , gen_kwargs={"files": files} ) )
return splits
def UpperCAmelCase ( self , UpperCAmelCase ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
__snake_case : str = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
__snake_case : Optional[int] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=UpperCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__snake_case : List[str] = table_cast(UpperCAmelCase , UpperCAmelCase )
return pa_table
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__snake_case : Union[str, Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase ) ):
__snake_case : Tuple = pd.read_csv(UpperCAmelCase , iterator=UpperCAmelCase , dtype=UpperCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCAmelCase ):
__snake_case : List[str] = pa.Table.from_pandas(UpperCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase )}: {e}""" )
raise
| 243 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __A : list[int | float] , __A : int , __A : int ) -> int | float:
"""simple docstring"""
if len(__A ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(__A )
or left < -len(__A )
or right >= len(__A )
or right < -len(__A )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a_ : Union[str, Any] = (left + right) >> 1 # the middle
a_ : Optional[Any] = find_max(__A , __A , __A ) # find max in range[left, mid]
a_ : Tuple = find_max(__A , mid + 1 , __A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 720 |
import random
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : str ) -> tuple[list[int], list[int]]:
a_ : int = [ord(SCREAMING_SNAKE_CASE__ ) for i in text]
a_ : Any = []
a_ : Optional[int] = []
for i in plain:
a_ : Tuple = random.randint(1 , 3_0_0 )
a_ : Optional[int] = (i + k) * k
cipher.append(SCREAMING_SNAKE_CASE__ )
key.append(SCREAMING_SNAKE_CASE__ )
return cipher, key
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] ) -> str:
a_ : List[Any] = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
a_ : str = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(SCREAMING_SNAKE_CASE__ ) )
return "".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ , UpperCAmelCase_ : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 443 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __SCREAMING_SNAKE_CASE ( __A , __A ):
lowerCamelCase_ = "dinat"
lowerCamelCase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[Any] , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : Optional[int]=64 , UpperCAmelCase__ : int=[3, 4, 6, 5] , UpperCAmelCase__ : str=[2, 4, 8, 16] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : Optional[int]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCAmelCase__ : List[str]=3.0 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : List[Any]=1E-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , **UpperCAmelCase__ : Any , ):
'''simple docstring'''
super().__init__(**_UpperCamelCase )
lowercase : Any =patch_size
lowercase : Union[str, Any] =num_channels
lowercase : List[str] =embed_dim
lowercase : List[Any] =depths
lowercase : Any =len(_UpperCamelCase )
lowercase : str =num_heads
lowercase : Tuple =kernel_size
lowercase : Optional[int] =dilations
lowercase : Optional[int] =mlp_ratio
lowercase : Union[str, Any] =qkv_bias
lowercase : int =hidden_dropout_prob
lowercase : Union[str, Any] =attention_probs_dropout_prob
lowercase : int =drop_path_rate
lowercase : Optional[int] =hidden_act
lowercase : Union[str, Any] =layer_norm_eps
lowercase : Any =initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase : str =int(embed_dim * 2 ** (len(_UpperCamelCase ) - 1) )
lowercase : Any =layer_scale_init_value
lowercase : Optional[int] =['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(_UpperCamelCase ) + 1 )]
lowercase , lowercase : Any =get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names )
| 92 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(_UpperCamelCase , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ):
snake_case_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
snake_case_ = 0
if string.startswith('''net.3.''' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('''net.''' ):
snake_case_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('''main.''' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = '''mid_block'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
snake_case_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = v
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(SCREAMING_SNAKE_CASE__ )
snake_case_ = MODELS_MAP[model_name]['''sample_rate''']
snake_case_ = MODELS_MAP[model_name]['''sample_size''']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )['''state_dict'''] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(SCREAMING_SNAKE_CASE__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
snake_case_ = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
snake_case_ = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
snake_case_ = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , SCREAMING_SNAKE_CASE__ )
print('''Diff max''' , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase_ = parser.parse_args()
main(args) | 39 | 0 |
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
A : List[Any] = [p / w for p, w in zip(_lowerCAmelCase , _lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
A : Union[str, Any] = sorted(_lowerCAmelCase )
# declaring useful variables
A : str = len(_lowerCAmelCase )
A : Union[str, Any] = 0
A : Any = 0
A : Dict = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
A : str = sorted_profit_by_weight[length - i - 1]
A : List[Any] = profit_by_weight.index(_lowerCAmelCase )
A : Tuple = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
SCREAMING_SNAKE_CASE_:Union[str, Any] = [int(x) for x in input("""Input profits separated by spaces: """).split()]
SCREAMING_SNAKE_CASE_:Optional[int] = [int(x) for x in input("""Input weights separated by spaces: """).split()]
SCREAMING_SNAKE_CASE_:int = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight) | 704 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_:List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Tuple = ["input_features", "is_longer"]
def __init__( self, lowerCamelCase__=64, lowerCamelCase__=4_8000, lowerCamelCase__=480, lowerCamelCase__=10, lowerCamelCase__=1024, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__ = 0, lowerCamelCase__ = 1_4000, lowerCamelCase__ = None, lowerCamelCase__ = "fusion", lowerCamelCase__ = "repeatpad", **lowerCamelCase__, ):
super().__init__(
feature_size=lowerCamelCase__, sampling_rate=lowerCamelCase__, padding_value=lowerCamelCase__, return_attention_mask=lowerCamelCase__, **lowerCamelCase__, )
A : Dict = top_db
A : Tuple = truncation
A : Union[str, Any] = padding
A : Optional[int] = fft_window_size
A : Optional[int] = (fft_window_size >> 1) + 1
A : Optional[int] = hop_length
A : List[Any] = max_length_s
A : List[str] = max_length_s * sampling_rate
A : List[str] = sampling_rate
A : Optional[int] = frequency_min
A : int = frequency_max
A : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase__, min_frequency=lowerCamelCase__, max_frequency=lowerCamelCase__, sampling_rate=lowerCamelCase__, norm=lowerCamelCase__, mel_scale="""htk""", )
A : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase__, min_frequency=lowerCamelCase__, max_frequency=lowerCamelCase__, sampling_rate=lowerCamelCase__, norm="""slaney""", mel_scale="""slaney""", )
def _lowerCAmelCase ( self ):
A : Optional[Any] = copy.deepcopy(self.__dict__ )
A : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : List[str] = spectrogram(
lowerCamelCase__, window_function(self.fft_window_size, """hann""" ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase__, log_mel="""dB""", )
return log_mel_spectrogram.T
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Union[str, Any] = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A : Dict = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A : Union[str, Any] = [0]
# randomly choose index for each part
A : str = np.random.choice(ranges[0] )
A : Optional[Any] = np.random.choice(ranges[1] )
A : int = np.random.choice(ranges[2] )
A : int = mel[idx_front : idx_front + chunk_frames, :]
A : Tuple = mel[idx_middle : idx_middle + chunk_frames, :]
A : Union[str, Any] = mel[idx_back : idx_back + chunk_frames, :]
A : Tuple = torch.tensor(mel[None, None, :] )
A : Any = torch.nn.functional.interpolate(
lowerCamelCase__, size=[chunk_frames, 64], mode="""bilinear""", align_corners=lowerCamelCase__ )
A : List[str] = mel_shrink[0][0].numpy()
A : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A : Tuple = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A : Union[str, Any] = len(lowerCamelCase__ ) - max_length
A : Dict = np.random.randint(0, overflow + 1 )
A : Union[str, Any] = waveform[idx : idx + max_length]
A : List[str] = self._np_extract_fbank_features(lowerCamelCase__, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A : Tuple = self._np_extract_fbank_features(lowerCamelCase__, self.mel_filters )
A : Optional[int] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A : Any = np.stack([mel, mel, mel, mel], axis=0 )
A : Optional[Any] = False
else:
A : Tuple = self._random_mel_fusion(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
A : str = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A : List[Any] = int(max_length / len(lowerCamelCase__ ) )
A : List[str] = np.stack(np.tile(lowerCamelCase__, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A : List[Any] = int(max_length / len(lowerCamelCase__ ) )
A : List[str] = np.stack(np.tile(lowerCamelCase__, lowerCamelCase__ ) )
A : Any = np.pad(lowerCamelCase__, (0, max_length - waveform.shape[0]), mode="""constant""", constant_values=0 )
if truncation == "fusion":
A : str = self._np_extract_fbank_features(lowerCamelCase__, self.mel_filters )
A : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
A : Optional[int] = self._np_extract_fbank_features(lowerCamelCase__, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : Any = truncation if truncation is not None else self.truncation
A : str = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A : Any = isinstance(lowerCamelCase__, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
A : Optional[Any] = is_batched_numpy or (
isinstance(lowerCamelCase__, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
A : Tuple = [np.asarray(lowerCamelCase__, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__, np.ndarray ):
A : str = np.asarray(lowerCamelCase__, dtype=np.floataa )
elif isinstance(lowerCamelCase__, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A : List[str] = [np.asarray(lowerCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
A : int = [
self._get_input_mel(lowerCamelCase__, max_length if max_length else self.nb_max_samples, lowerCamelCase__, lowerCamelCase__ )
for waveform in raw_speech
]
A : Optional[Any] = []
A : Optional[int] = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase__ )
is_longer.append(lowerCamelCase__ )
if truncation == "fusion" and sum(lowerCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A : Optional[Any] = np.random.randint(0, len(lowerCamelCase__ ) )
A : Union[str, Any] = True
if isinstance(input_mel[0], lowerCamelCase__ ):
A : List[Any] = [np.asarray(lowerCamelCase__, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A : Optional[Any] = [[longer] for longer in is_longer]
A : Tuple = {"""input_features""": input_mel, """is_longer""": is_longer}
A : Any = BatchFeature(lowerCamelCase__ )
if return_tensors is not None:
A : Dict = input_features.convert_to_tensors(lowerCamelCase__ )
return input_features
| 520 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
UpperCamelCase = mf_knapsack(i - 1 , snake_case__ , snake_case__ , snake_case__ )
else:
UpperCamelCase = max(
mf_knapsack(i - 1 , snake_case__ , snake_case__ , snake_case__ ) , mf_knapsack(i - 1 , snake_case__ , snake_case__ , j - wt[i - 1] ) + val[i - 1] , )
UpperCamelCase = val
return f[i][j]
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Dict:
"""simple docstring"""
UpperCamelCase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
UpperCamelCase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
UpperCamelCase = dp[i - 1][w_]
return dp[n][w_], dp
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> str:
"""simple docstring"""
if not (isinstance(snake_case__ , (list, tuple) ) and isinstance(snake_case__ , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
UpperCamelCase = len(snake_case__ )
if num_items != len(snake_case__ ):
UpperCamelCase = (
"""The number of weights must be the same as the number of values.\n"""
F"But got {num_items} weights and {len(snake_case__ )} values"
)
raise ValueError(snake_case__ )
for i in range(snake_case__ ):
if not isinstance(wt[i] , snake_case__ ):
UpperCamelCase = (
"""All weights must be integers but got weight of """
F"type {type(wt[i] )} at index {i}"
)
raise TypeError(snake_case__ )
UpperCamelCase = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase = set()
_construct_solution(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return optimal_val, example_optional_set
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[Any]:
"""simple docstring"""
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(snake_case__ , snake_case__ , i - 1 , snake_case__ , snake_case__ )
else:
optimal_set.add(snake_case__ )
_construct_solution(snake_case__ , snake_case__ , i - 1 , j - wt[i - 1] , snake_case__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = [3, 2, 4, 4]
SCREAMING_SNAKE_CASE = [4, 3, 2, 3]
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 6
SCREAMING_SNAKE_CASE = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 554 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , ):
super().__init__()
A_ : Tuple = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : List[str] = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : Any = False
A_ : Tuple = nn.Dropout(p=lowerCAmelCase_ )
A_ : List[str] = TaConfig(
vocab_size=lowerCAmelCase_ , d_model=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , feed_forward_proj=lowerCAmelCase_ , is_decoder=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , )
A_ : Optional[Any] = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
A_ : Tuple = TaBlock(lowerCAmelCase_ )
self.encoders.append(lowerCAmelCase_ )
A_ : Any = TaLayerNorm(lowerCAmelCase_ )
A_ : Union[str, Any] = nn.Dropout(p=lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : List[Any] = self.token_embedder(lowerCAmelCase_ )
A_ : Optional[Any] = encoder_input_tokens.shape[1]
A_ : Any = torch.arange(lowerCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase_ )
A_ : Optional[Any] = self.dropout_pre(lowerCAmelCase_ )
# inverted the attention mask
A_ : int = encoder_input_tokens.size()
A_ : Optional[Any] = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ )
for lyr in self.encoders:
A_ : int = lyr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
A_ : List[str] = self.layer_norm(lowerCAmelCase_ )
return self.dropout_post(lowerCAmelCase_ ), encoder_inputs_mask
| 180 | 0 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 708 | '''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[int] = "examples/"
__lowerCAmelCase : Dict = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : int = "README.md"
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern]
__UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ )
__UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
__UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start of the list.
__UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
__UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__UpperCAmelCase = default_version.base_version
elif patch:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = get_version()
__UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 654 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=13 , _SCREAMING_SNAKE_CASE : List[Any]=7 , _SCREAMING_SNAKE_CASE : List[str]=True , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Optional[int]=True , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : List[str]=99 , _SCREAMING_SNAKE_CASE : Tuple=32 , _SCREAMING_SNAKE_CASE : str=5 , _SCREAMING_SNAKE_CASE : Any=4 , _SCREAMING_SNAKE_CASE : Optional[int]=37 , _SCREAMING_SNAKE_CASE : str="gelu" , _SCREAMING_SNAKE_CASE : List[Any]=0.1 , _SCREAMING_SNAKE_CASE : int=0.1 , _SCREAMING_SNAKE_CASE : Dict=512 , _SCREAMING_SNAKE_CASE : Union[str, Any]=16 , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : str=0.0_2 , _SCREAMING_SNAKE_CASE : List[str]=4 , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_choices
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = config_and_inputs
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCAmelCase__ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : Any = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = FlaxBertModelTester(self )
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxBertModel.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase__ )
| 265 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 | 0 |
'''simple docstring'''
from __future__ import annotations
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if nth_term == "":
return [""]
lowerCamelCase_ : List[str] = int(__UpperCAmelCase )
lowerCamelCase_ : List[str] = int(__UpperCAmelCase )
lowerCamelCase_ : list[str] = []
for temp in range(int(__UpperCAmelCase ) ):
series.append(F"""1 / {pow(temp + 1 , int(__UpperCAmelCase ) )}""" if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : Tuple = int(input("""Enter the last number (nth term) of the P-Series"""))
__lowerCamelCase : Tuple = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 708 |
'''simple docstring'''
def __snake_case (__UpperCAmelCase = 3 , __UpperCAmelCase = 7 , __UpperCAmelCase = 1000000 ):
"""simple docstring"""
lowerCamelCase_ : Any = 0
lowerCamelCase_ : Tuple = 1
for current_denominator in range(1 , limit + 1 ):
lowerCamelCase_ : str = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowerCamelCase_ : Any = current_numerator
lowerCamelCase_ : Dict = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 418 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: str = ["audio_values", "audio_mask"]
def __init__( self : Optional[Any] , A : Optional[int]=2048 , A : Any=1 , A : Union[str, Any]=[16, 16] , A : Optional[int]=128 , A : Optional[int]=44100 , A : Dict=86 , A : List[Any]=2048 , A : Optional[Any]=0.0 , **A : List[str] , ):
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , **A , )
_UpperCAmelCase : Any = spectrogram_length
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : int = patch_size
_UpperCAmelCase : Union[str, Any] = feature_size // self.patch_size[1]
_UpperCAmelCase : Dict = n_fft
_UpperCAmelCase : Dict = sampling_rate // hop_length_to_sampling_rate
_UpperCAmelCase : Optional[int] = sampling_rate
_UpperCAmelCase : Any = padding_value
_UpperCAmelCase : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=A , norm="slaney" , mel_scale="slaney" , ).T
def _A ( self : Any , A : np.array ):
_UpperCAmelCase : Tuple = spectrogram(
A , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
_UpperCAmelCase : List[str] = log_spec[:, :-1]
_UpperCAmelCase : List[str] = log_spec - 20.0
_UpperCAmelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : List[str] , A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A : Optional[Union[str, TensorType]] = None , A : Optional[bool] = True , A : Optional[int] = None , A : bool = False , A : bool = False , **A : List[Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_UpperCAmelCase : Tuple = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase : Union[str, Any] = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
_UpperCAmelCase : str = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase : Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_UpperCAmelCase : Union[str, Any] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A ):
_UpperCAmelCase : int = [np.asarray(A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_UpperCAmelCase : Dict = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_UpperCAmelCase : int = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_UpperCAmelCase : Optional[int] = np.array(A ).astype(np.floataa )
# convert into correct format for padding
_UpperCAmelCase : Any = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_UpperCAmelCase : Union[str, Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_UpperCAmelCase : List[str] = padded_audio_features * self.padding_value
for i in range(len(A ) ):
_UpperCAmelCase : Optional[Any] = audio_features[i]
_UpperCAmelCase : Optional[Any] = feature
# return as BatchFeature
if return_attention_mask:
_UpperCAmelCase : Union[str, Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
_UpperCAmelCase : Tuple = {"audio_values": padded_audio_features}
_UpperCAmelCase : Optional[Any] = BatchFeature(data=A , tensor_type=A )
return encoded_inputs
| 244 | '''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , A : List[Any] , ):
_UpperCAmelCase : Union[str, Any] = parent
_UpperCAmelCase : Tuple = 13
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : List[Any] = 30
_UpperCAmelCase : Any = self.seq_length + self.mem_len
_UpperCAmelCase : str = 15
_UpperCAmelCase : Dict = True
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = 99
_UpperCAmelCase : int = [10, 50, 80]
_UpperCAmelCase : List[str] = 32
_UpperCAmelCase : List[str] = 32
_UpperCAmelCase : Any = 4
_UpperCAmelCase : List[Any] = 8
_UpperCAmelCase : Any = 128
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : int = 2
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Any = 3
_UpperCAmelCase : List[str] = self.vocab_size - 1
_UpperCAmelCase : Any = 0.01
def _A ( self : Any ):
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _A ( self : Optional[int] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _A ( self : int , A : Union[str, Any] , A : str , A : List[str] , A : Optional[int] ):
_UpperCAmelCase : int = TFTransfoXLModel(A )
_UpperCAmelCase , _UpperCAmelCase : str = model(A ).to_tuple()
_UpperCAmelCase : Any = {"input_ids": input_ids_a, "mems": mems_a}
_UpperCAmelCase , _UpperCAmelCase : Dict = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _A ( self : Optional[int] , A : Dict , A : Union[str, Any] , A : Any , A : Tuple ):
_UpperCAmelCase : Tuple = TFTransfoXLLMHeadModel(A )
_UpperCAmelCase , _UpperCAmelCase : Dict = model(A ).to_tuple()
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids_a, "labels": lm_labels}
_UpperCAmelCase , _UpperCAmelCase : List[Any] = model(A ).to_tuple()
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = model([input_ids_a, mems_a] ).to_tuple()
_UpperCAmelCase : Tuple = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
_UpperCAmelCase , _UpperCAmelCase : Tuple = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _A ( self : Optional[Any] , A : Optional[Any] , A : Tuple , A : List[str] , A : Union[str, Any] ):
_UpperCAmelCase : Dict = TFTransfoXLForSequenceClassification(A )
_UpperCAmelCase : List[Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : int ):
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = config_and_inputs
_UpperCAmelCase : Any = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class lowerCamelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Any = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__UpperCamelCase: Tuple = () if is_tf_available() else ()
__UpperCamelCase: Union[str, Any] = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__UpperCamelCase: Any = False
__UpperCamelCase: Optional[Any] = False
__UpperCamelCase: List[str] = False
__UpperCamelCase: List[Any] = False
def _A ( self : Tuple , A : Dict , A : int , A : str , A : Any , A : List[str] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _A ( self : List[str] ):
_UpperCAmelCase : int = TFTransfoXLModelTester(self )
_UpperCAmelCase : Any = ConfigTester(self , config_class=A , d_embed=37 )
def _A ( self : int ):
self.config_tester.run_common_tests()
def _A ( self : List[Any] ):
self.model_tester.set_seed()
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def _A ( self : str ):
self.model_tester.set_seed()
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def _A ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : List[str] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = model_class(A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_UpperCAmelCase : List[Any] = model.get_output_embeddings()
assert isinstance(A , tf.keras.layers.Layer )
_UpperCAmelCase : List[Any] = model.get_bias()
assert name is None
else:
_UpperCAmelCase : Optional[int] = model.get_output_embeddings()
assert x is None
_UpperCAmelCase : Any = model.get_bias()
assert name is None
def _A ( self : Dict ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def _A ( self : Any ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : int = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def _A ( self : Any ):
pass
@require_tf
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : str = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
_UpperCAmelCase : int = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_UpperCAmelCase : List[str] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_UpperCAmelCase : List[Any] = model.generate(A , max_length=200 , do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() , A )
| 244 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
lowerCamelCase__ =StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
lowerCamelCase__ =sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
sd_pipe.set_scheduler("sample_euler" )
lowerCamelCase__ ="A painting of a squirrel eating a burger"
lowerCamelCase__ =torch.manual_seed(0 )
lowerCamelCase__ =sd_pipe([prompt] , generator=_lowerCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
lowerCamelCase__ =output.images
lowerCamelCase__ =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ =np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ):
lowerCamelCase__ =StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowerCamelCase__ =sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
sd_pipe.set_scheduler("sample_euler" )
lowerCamelCase__ ="A painting of a squirrel eating a burger"
lowerCamelCase__ =torch.manual_seed(0 )
lowerCamelCase__ =sd_pipe([prompt] , generator=_lowerCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
lowerCamelCase__ =output.images
lowerCamelCase__ =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ =np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _a ( self ):
lowerCamelCase__ =StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowerCamelCase__ =sd_pipe.to(_lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
lowerCamelCase__ ="A painting of a squirrel eating a burger"
lowerCamelCase__ =torch.manual_seed(0 )
lowerCamelCase__ =sd_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=_lowerCamelCase , )
lowerCamelCase__ =output.images
lowerCamelCase__ =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ =np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 132 | """simple docstring"""
import sys
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ =len(__lowerCAmelCase )
lowerCamelCase__ =[[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
lowerCamelCase__ =[[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
for chain_length in range(2 , __lowerCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
lowerCamelCase__ =a + chain_length - 1
lowerCamelCase__ =sys.maxsize
for c in range(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCamelCase__ =cost
lowerCamelCase__ =c
return matrix, sol
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if i == j:
print("A" + str(__lowerCAmelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCAmelCase , __lowerCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCAmelCase , optimal_solution[i][j] + 1 , __lowerCAmelCase )
print(")" , end=" " )
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ =[30, 35, 15, 5, 10, 20, 25]
lowerCamelCase__ =len(__lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCamelCase__ , lowerCamelCase__ =matrix_chain_order(__lowerCAmelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 132 | 1 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> Dict:
print("Loading config file..." )
def flatten_yaml_as_dict(lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[Any]="" , lowerCAmelCase: Tuple="." ):
_UpperCAmelCase : Union[str, Any] = []
for k, v in d.items():
_UpperCAmelCase : Optional[Any] = parent_key + sep + k if parent_key else k
if isinstance(lowerCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowerCAmelCase , lowerCAmelCase , sep=lowerCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(lowerCAmelCase )
_UpperCAmelCase : Optional[int] = argparse.Namespace()
with open(lowerCAmelCase , "r" ) as yaml_file:
try:
_UpperCAmelCase : Union[str, Any] = yaml.load(lowerCAmelCase , Loader=yaml.FullLoader )
_UpperCAmelCase : List[str] = flatten_yaml_as_dict(lowerCAmelCase )
for k, v in flat_cfg.items():
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(lowerCAmelCase , str(lowerCAmelCase ) ) )
return config
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple , lowerCAmelCase: Union[str, Any] ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = MobileViTVaConfig()
_UpperCAmelCase : Tuple = False
# dataset
if task_name.startswith("imagenet1k_" ):
_UpperCAmelCase : List[str] = 1000
if int(task_name.strip().split("_" )[-1] ) == 384:
_UpperCAmelCase : Union[str, Any] = 384
else:
_UpperCAmelCase : Any = 256
_UpperCAmelCase : Optional[int] = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_" ):
_UpperCAmelCase : Tuple = 2_1000
if int(task_name.strip().split("_" )[-1] ) == 384:
_UpperCAmelCase : Tuple = 384
else:
_UpperCAmelCase : Any = 256
_UpperCAmelCase : List[Any] = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_" ):
_UpperCAmelCase : Dict = 151
_UpperCAmelCase : Optional[Any] = 512
_UpperCAmelCase : List[str] = "ade20k-id2label.json"
_UpperCAmelCase : Dict = True
elif task_name.startswith("voc_" ):
_UpperCAmelCase : Any = 21
_UpperCAmelCase : Optional[int] = 512
_UpperCAmelCase : List[str] = "pascal-voc-id2label.json"
_UpperCAmelCase : int = True
# orig_config
_UpperCAmelCase : Optional[Any] = load_orig_config_file(lowerCAmelCase )
assert getattr(lowerCAmelCase , "model.classification.name" , -1 ) == "mobilevit_v2", "Invalid model"
_UpperCAmelCase : Optional[int] = getattr(lowerCAmelCase , "model.classification.mitv2.width_multiplier" , 1.0 )
assert (
getattr(lowerCAmelCase , "model.classification.mitv2.attn_norm_layer" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_UpperCAmelCase : List[Any] = getattr(lowerCAmelCase , "model.classification.activation.name" , "swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_UpperCAmelCase : List[str] = getattr(lowerCAmelCase , "model.segmentation.output_stride" , 16 )
if "_deeplabv3" in task_name:
_UpperCAmelCase : str = getattr(lowerCAmelCase , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36] )
_UpperCAmelCase : Dict = getattr(lowerCAmelCase , "model.segmentation.deeplabv3.aspp_out_channels" , 512 )
_UpperCAmelCase : Optional[Any] = getattr(lowerCAmelCase , "model.segmentation.deeplabv3.aspp_dropout" , 0.1 )
# id2label
_UpperCAmelCase : str = "huggingface/label-files"
_UpperCAmelCase : List[str] = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : str = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : List[str] = idalabel
_UpperCAmelCase : int = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: int ) -> str:
_UpperCAmelCase : Tuple = dct.pop(lowerCAmelCase )
_UpperCAmelCase : Any = val
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Tuple=False ) -> Any:
if base_model:
_UpperCAmelCase : Optional[int] = ""
else:
_UpperCAmelCase : Any = "mobilevitv2."
_UpperCAmelCase : List[Any] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_UpperCAmelCase : Union[str, Any] = k[8:]
else:
_UpperCAmelCase : Any = k
if ".block." in k:
_UpperCAmelCase : List[str] = k_new.replace(".block." , "." )
if ".conv." in k:
_UpperCAmelCase : int = k_new.replace(".conv." , ".convolution." )
if ".norm." in k:
_UpperCAmelCase : List[Any] = k_new.replace(".norm." , ".normalization." )
if "conv_1." in k:
_UpperCAmelCase : List[str] = k_new.replace("conv_1." , F'{model_prefix}conv_stem.' )
for i in [1, 2]:
if F'layer_{i}.' in k:
_UpperCAmelCase : Any = k_new.replace(F'layer_{i}.' , F'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
_UpperCAmelCase : Optional[Any] = k_new.replace(".exp_1x1." , ".expand_1x1." )
if ".red_1x1." in k:
_UpperCAmelCase : List[Any] = k_new.replace(".red_1x1." , ".reduce_1x1." )
for i in [3, 4, 5]:
if F'layer_{i}.0.' in k:
_UpperCAmelCase : Optional[Any] = k_new.replace(F'layer_{i}.0.' , F'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if F'layer_{i}.1.local_rep.0.' in k:
_UpperCAmelCase : List[str] = k_new.replace(F'layer_{i}.1.local_rep.0.' , F'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if F'layer_{i}.1.local_rep.1.' in k:
_UpperCAmelCase : int = k_new.replace(F'layer_{i}.1.local_rep.1.' , F'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
_UpperCAmelCase : Optional[int] = [0, 1]
elif i == 4:
_UpperCAmelCase : Union[str, Any] = [0, 1, 2, 3]
elif i == 5:
_UpperCAmelCase : Optional[int] = [0, 1, 2]
for j in j_in:
if F'layer_{i}.1.global_rep.{j}.' in k:
_UpperCAmelCase : int = k_new.replace(
F'layer_{i}.1.global_rep.{j}.' , F'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if F'layer_{i}.1.global_rep.{j+1}.' in k:
_UpperCAmelCase : Any = k_new.replace(
F'layer_{i}.1.global_rep.{j+1}.' , F'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if F'layer_{i}.1.conv_proj.' in k:
_UpperCAmelCase : str = k_new.replace(F'layer_{i}.1.conv_proj.' , F'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
_UpperCAmelCase : int = k_new.replace("pre_norm_attn.0." , "layernorm_before." )
if "pre_norm_attn.1." in k:
_UpperCAmelCase : Tuple = k_new.replace("pre_norm_attn.1." , "attention." )
if "pre_norm_ffn.0." in k:
_UpperCAmelCase : Tuple = k_new.replace("pre_norm_ffn.0." , "layernorm_after." )
if "pre_norm_ffn.1." in k:
_UpperCAmelCase : str = k_new.replace("pre_norm_ffn.1." , "ffn.conv1." )
if "pre_norm_ffn.3." in k:
_UpperCAmelCase : Any = k_new.replace("pre_norm_ffn.3." , "ffn.conv2." )
if "classifier.1." in k:
_UpperCAmelCase : Any = k_new.replace("classifier.1." , "classifier." )
if "seg_head." in k:
_UpperCAmelCase : Tuple = k_new.replace("seg_head." , "segmentation_head." )
if ".aspp_layer." in k:
_UpperCAmelCase : Optional[Any] = k_new.replace(".aspp_layer." , "." )
if ".aspp_pool." in k:
_UpperCAmelCase : Optional[Any] = k_new.replace(".aspp_pool." , "." )
rename_keys.append((k, k_new) )
return rename_keys
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> Any:
_UpperCAmelCase : Dict = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(lowerCAmelCase )
for k in keys_to_ignore:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
_UpperCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_UpperCAmelCase : int = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: str , lowerCAmelCase: Optional[Any] ) -> List[str]:
_UpperCAmelCase : Optional[Any] = get_mobilevitva_config(lowerCAmelCase , lowerCAmelCase )
# load original state_dict
_UpperCAmelCase : Optional[Any] = torch.load(lowerCAmelCase , map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
_UpperCAmelCase : List[str] = MobileViTVaForSemanticSegmentation(lowerCAmelCase ).eval()
_UpperCAmelCase : str = False
else:
_UpperCAmelCase : Optional[Any] = MobileViTVaForImageClassification(lowerCAmelCase ).eval()
_UpperCAmelCase : Any = False
# remove and rename some keys of load the original model
_UpperCAmelCase : int = checkpoint
remove_unused_keys(lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = create_rename_keys(lowerCAmelCase , base_model=lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# load modified state_dict
model.load_state_dict(lowerCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
_UpperCAmelCase : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_UpperCAmelCase : Any = image_processor(images=prepare_img() , return_tensors="pt" )
_UpperCAmelCase : Tuple = model(**lowerCAmelCase )
# verify classification model
if task_name.startswith("imagenet" ):
_UpperCAmelCase : Any = outputs.logits
_UpperCAmelCase : Dict = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_UpperCAmelCase : List[Any] = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , lowerCAmelCase , atol=1E-4 )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 300 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class a ( UpperCAmelCase ):
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
return 0.0
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: np.ndarray , lowerCAmelCase: int ) -> tuple[int | float, int | float]:
_UpperCAmelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_UpperCAmelCase : List[str] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: FilterType , lowerCAmelCase: int ) -> None:
_UpperCAmelCase : Optional[int] = 512
_UpperCAmelCase : Dict = [1] + [0] * (size - 1)
_UpperCAmelCase : Union[str, Any] = [filter_type.process(lowerCAmelCase ) for item in inputs]
_UpperCAmelCase : str = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCAmelCase : Union[str, Any] = np.abs(np.fft.fft(lowerCAmelCase ) )
_UpperCAmelCase : List[Any] = 20 * np.logaa(lowerCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_UpperCAmelCase : Dict = get_bounds(lowerCAmelCase , lowerCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(lowerCAmelCase )
plt.show()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: FilterType , lowerCAmelCase: int ) -> None:
_UpperCAmelCase : int = 512
_UpperCAmelCase : Optional[int] = [1] + [0] * (size - 1)
_UpperCAmelCase : Dict = [filter_type.process(lowerCAmelCase ) for item in inputs]
_UpperCAmelCase : Any = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCAmelCase : Optional[int] = np.angle(np.fft.fft(lowerCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(lowerCAmelCase , -2 * pi ) )
plt.show()
| 300 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __lowerCamelCase ( _lowercase ) -> int:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def __lowerCamelCase ( _lowercase ) -> Tuple:
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase = ord(_lowercase )
if not _is_chinese_char(_lowercase ):
return 0
return 1
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCamelCase = set()
for token in tokens:
UpperCamelCase = len(_lowercase ) > 1 and is_chinese(_lowercase )
if chinese_word:
word_set.add(_lowercase )
UpperCamelCase = list(_lowercase )
return word_list
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
if not chinese_word_set:
return bert_tokens
UpperCamelCase = max([len(_lowercase ) for w in chinese_word_set] )
UpperCamelCase = bert_tokens
UpperCamelCase , UpperCamelCase = 0, len(_lowercase )
while start < end:
UpperCamelCase = True
if is_chinese(bert_word[start] ):
UpperCamelCase = min(end - start , _lowercase )
for i in range(_lowercase , 1 , -1 ):
UpperCamelCase = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase = '##' + bert_word[j]
UpperCamelCase = start + i
UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Dict:
UpperCamelCase = []
for i in range(0 , len(_lowercase ) , 100 ):
UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
UpperCamelCase = [get_chinese_word(_lowercase ) for r in res]
ltp_res.extend(_lowercase )
assert len(_lowercase ) == len(_lowercase )
UpperCamelCase = []
for i in range(0 , len(_lowercase ) , 100 ):
UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowercase , truncation=_lowercase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowercase ) == len(_lowercase )
UpperCamelCase = []
for input_ids, chinese_word in zip(_lowercase , _lowercase ):
UpperCamelCase = []
for id in input_ids:
UpperCamelCase = bert_tokenizer._convert_id_to_token(_lowercase )
input_tokens.append(_lowercase )
UpperCamelCase = add_sub_symbol(_lowercase , _lowercase )
UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowercase ):
if token[:2] == "##":
UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(_lowercase ) == 1 and _is_chinese_char(ord(_lowercase ) ):
ref_id.append(_lowercase )
ref_ids.append(_lowercase )
assert len(_lowercase ) == len(_lowercase )
return ref_ids
def __lowerCamelCase ( _lowercase ) -> int:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = [line.strip() for line in data if len(_lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase = LTP(args.ltp ) # faster in GPU device
UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase = prepare_ref(_lowercase , _lowercase , _lowercase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
UpperCamelCase = [json.dumps(_lowercase ) + '\n' for ref in ref_ids]
f.writelines(_lowercase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
_snake_case = parser.parse_args()
main(args)
| 170 |
def __lowerCamelCase ( _lowercase ) -> int:
assert (
isinstance(_lowercase , _lowercase ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
UpperCamelCase , UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCamelCase , UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 | 1 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCamelCase : Optional[int] = pytest.mark.integration
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
lowerCAmelCase = dset.map(
lambda _snake_case , _snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_snake_case , keep_in_memory=_snake_case )
lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_snake_case )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertRaises(_snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
self.assertRaises(_snake_case , index.search_batch , queries[0] )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_snake_case ):
lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = faiss.IndexFlat(5 )
lowerCAmelCase = FaissIndex(custom_index=_snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ):
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase = 'index.faiss'
lowerCAmelCase = F'mock://{index_name}'
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = Elasticsearch()
lowerCAmelCase = {'acknowledged': True}
lowerCAmelCase = ElasticSearchIndex(es_client=_snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
# batched queries with timeout
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case , request_timeout=30 )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
| 4 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase : str =abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase_ ( __lowerCamelCase : int ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : str ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowercase_ :Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCamelCase ,id=__lowerCamelCase )
| 172 | 0 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( a__ , a__ , unittest.TestCase ):
_a = VQModel
_a = """sample"""
@property
def __lowercase ( self : Tuple , lowerCAmelCase : Dict=(32, 32) ):
lowerCAmelCase = 4
lowerCAmelCase = 3
lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase__ )
return {"sample": image}
@property
def __lowercase ( self : int ):
return (3, 32, 32)
@property
def __lowercase ( self : Dict ):
return (3, 32, 32)
def __lowercase ( self : Any ):
lowerCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def __lowercase ( self : int ):
pass
def __lowercase ( self : str ):
pass
def __lowercase ( self : Tuple ):
lowerCAmelCase = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowerCAmelCase__ )
lowerCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __lowercase ( self : str ):
lowerCAmelCase = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(lowerCAmelCase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowerCAmelCase = image.to(lowerCAmelCase__ )
with torch.no_grad():
lowerCAmelCase = model(lowerCAmelCase__ ).sample
lowerCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
| 709 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
a = logging.get_logger(__name__)
def lowercase (snake_case__ : bool , snake_case__ : bool ) -> Tuple:
'''simple docstring'''
def run_func(snake_case__ : Any ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__ : Optional[Any] , **snake_case__ : int ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__ : int , **snake_case__ : Tuple ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowercase (snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> ["tf.Tensor"]:
'''simple docstring'''
lowerCAmelCase = random.Random()
lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 42
_a = 42
_a = "TensorFlow"
@property
def __lowercase ( self : Optional[int] ):
return tf.__version__
def __lowercase ( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
# initialize GPU on separate process
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCAmelCase = self._prepare_inference_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return self._measure_speed(_inference )
def __lowercase ( self : str , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCAmelCase = self._prepare_train_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return self._measure_speed(_train )
def __lowercase ( self : str , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCAmelCase )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCAmelCase = self._prepare_inference_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return self._measure_memory(_inference )
def __lowercase ( self : int , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCAmelCase )
lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowerCAmelCase = self._prepare_train_func(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return self._measure_memory(_train )
def __lowercase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
lowerCAmelCase = (
hasattr(lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
lowerCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = model_cls(lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](lowerCAmelCase )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCAmelCase , decoder_input_ids=lowerCAmelCase , training=lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCAmelCase , training=lowerCAmelCase )
lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowercase ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : int ):
lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
lowerCAmelCase = (
hasattr(lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase = __import__("""transformers""" , fromlist=[model_class] )
lowerCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = model_cls(lowerCAmelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCAmelCase )
# encoder-decoder has vocab size saved differently
lowerCAmelCase = config.vocab_size if hasattr(lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
lowerCAmelCase = random_input_ids(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase = model(lowerCAmelCase , decoder_input_ids=lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase )[0]
lowerCAmelCase = tf.gradients(lowerCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase = model(lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase )[0]
lowerCAmelCase = tf.gradients(lowerCAmelCase , model.trainable_variables )
return gradients
lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowercase ( self : Optional[int] , lowerCAmelCase : List[str] ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase = timeit.repeat(
lowerCAmelCase , repeat=self.args.repeat , number=10 , )
return min(lowerCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Callable[[], None] ):
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
lowerCAmelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
lowerCAmelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCAmelCase )
lowerCAmelCase = meminfo.used
lowerCAmelCase = Memory(lowerCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
lowerCAmelCase = None
else:
lowerCAmelCase = measure_peak_memory_cpu(lowerCAmelCase )
lowerCAmelCase = Memory(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase = stop_memory_tracing(lowerCAmelCase )
if memory is None:
lowerCAmelCase = summary.total
else:
lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 529 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A : Optional[int] = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 499 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
def a_ ( self ):
UpperCamelCase : Any = tempfile.mkdtemp()
UpperCamelCase : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
UpperCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCamelCase : List[Any] = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
"""do_convert_rgb""": True,
}
UpperCamelCase : Tuple = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a_ ( self , **SCREAMING_SNAKE_CASE_ ):
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , **SCREAMING_SNAKE_CASE_ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , **SCREAMING_SNAKE_CASE_ ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
UpperCamelCase : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase : List[Any] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self ):
UpperCamelCase : List[Any] = self.get_tokenizer()
UpperCamelCase : Any = self.get_rust_tokenizer()
UpperCamelCase : str = self.get_image_processor()
UpperCamelCase : int = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase : List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase : str = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Any = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase : Optional[int] = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
UpperCamelCase : str = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : List[Any] = self.get_image_processor()
UpperCamelCase : Optional[Any] = self.get_tokenizer()
UpperCamelCase : str = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.prepare_image_inputs()
UpperCamelCase : Optional[int] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""np""" )
UpperCamelCase : Optional[Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a_ ( self ):
UpperCamelCase : Tuple = self.get_image_processor()
UpperCamelCase : Optional[Any] = self.get_tokenizer()
UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = """Alexandra,T-shirt的价格是15便士。"""
UpperCamelCase : Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self ):
UpperCamelCase : int = self.get_image_processor()
UpperCamelCase : Optional[Any] = self.get_tokenizer()
UpperCamelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = """Alexandra,T-shirt的价格是15便士。"""
UpperCamelCase : Tuple = self.prepare_image_inputs()
UpperCamelCase : int = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def a_ ( self ):
UpperCamelCase : Any = self.get_image_processor()
UpperCamelCase : str = self.get_tokenizer()
UpperCamelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase : Any = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : List[Any] = self.get_image_processor()
UpperCamelCase : Optional[int] = self.get_tokenizer()
UpperCamelCase : List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = """Alexandra,T-shirt的价格是15便士。"""
UpperCamelCase : int = self.prepare_image_inputs()
UpperCamelCase : Any = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 499 | 1 |
'''simple docstring'''
import os
def UpperCAmelCase_ ( ):
"""simple docstring"""
with open(os.path.dirname(lowerCamelCase_ ) + "/grid.txt" ) as f:
lowerCAmelCase__ : Optional[Any] = [] # noqa: E741
for _ in range(2_0 ):
l.append([int(lowerCamelCase_ ) for x in f.readline().split()] )
lowerCAmelCase__ : Optional[int] = 0
# right
for i in range(2_0 ):
for j in range(1_7 ):
lowerCAmelCase__ : List[str] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCAmelCase__ : Union[str, Any] = temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
lowerCAmelCase__ : List[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCAmelCase__ : Tuple = temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
lowerCAmelCase__ : Optional[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCAmelCase__ : Optional[int] = temp
# diagonal 2
for i in range(1_7 ):
for j in range(3 , 2_0 ):
lowerCAmelCase__ : List[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCAmelCase__ : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 568 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
snake_case = logging.getLogger(__name__)
snake_case = 50 # max width of layer names
snake_case = 70 # max width of quantizer names
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=lowerCamelCase_ , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=lowerCamelCase_ , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=lowerCamelCase_ , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=lowerCamelCase_ , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=lowerCamelCase_ , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=lowerCamelCase_ , type=lowerCamelCase_ , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=lowerCamelCase_ , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if args.calibrator == "max":
lowerCAmelCase__ : int = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
lowerCAmelCase__ : List[str] = "histogram"
elif args.calibrator == "mse":
lowerCAmelCase__ : Any = "histogram"
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
lowerCAmelCase__ : Union[str, Any] = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase_ )
lowerCAmelCase__ : Any = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False ):
"""simple docstring"""
logger.info("Configuring Model for Quantization" )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase_ , ["embeddings"] , which="weight" , _disabled=lowerCamelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase_ , [""] , _disabled=lowerCamelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase_ , args.quant_disable_keyword , _disabled=lowerCamelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase_ , [R"layer.\d+." + args.quant_disable_layer_module] , _disabled=lowerCamelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase_ , [R"layer.\d+." + args.quant_enable_layer_module] , _disabled=lowerCamelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase_ , lowerCamelCase_ )
if args.clip_gelu:
clip_gelu(lowerCamelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
def fusea(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase_ , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
lowerCAmelCase__ : Optional[int] = qq._amax.detach().item()
lowerCAmelCase__ : Optional[int] = qk._amax.detach().item()
lowerCAmelCase__ : Any = qv._amax.detach().item()
lowerCAmelCase__ : List[str] = max(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
qq._amax.fill_(lowerCamelCase_ )
qk._amax.fill_(lowerCamelCase_ )
qv._amax.fill_(lowerCamelCase_ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
lowerCAmelCase__ : Any = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase_ )
lowerCAmelCase__ : Tuple = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
lowerCAmelCase__ : Optional[int] = mod.weight.shape[0]
lowerCAmelCase__ : List[str] = mod._weight_quantizer._amax.detach()
lowerCAmelCase__ : List[str] = torch.ones(lowerCamelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCAmelCase__ : List[Any] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCAmelCase__ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCAmelCase__ : Tuple = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase_ , keepdims=lowerCamelCase_ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
lowerCAmelCase__ : str = amax
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=2_5 , lowerCamelCase_=1_8_0 , lowerCamelCase_=None ):
"""simple docstring"""
if ignore is None:
lowerCAmelCase__ : str = []
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ : List[Any] = [ignore]
lowerCAmelCase__ : Optional[int] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase_ , "weight" ):
continue
lowerCAmelCase__ : str = max(lowerCamelCase_ , len(lowerCamelCase_ ) )
for name, mod in model.named_modules():
lowerCAmelCase__ : Tuple = getattr(lowerCamelCase_ , "_input_quantizer" , lowerCamelCase_ )
lowerCAmelCase__ : Union[str, Any] = getattr(lowerCamelCase_ , "_weight_quantizer" , lowerCamelCase_ )
if not hasattr(lowerCamelCase_ , "weight" ):
continue
if type(lowerCamelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase_ ) is str and s in name]:
continue
lowerCAmelCase__ : List[Any] = f'''Act:{input_q.extra_repr()}'''
lowerCAmelCase__ : Optional[int] = f'''Wgt:{weight_q.extra_repr()}'''
lowerCAmelCase__ : int = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCamelCase_ ) <= line_width:
logger.info(lowerCamelCase_ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{' ':{name_width}} {wgt_str}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase_ , lowerCamelCase_ )
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="both" , **lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCamelCase_ , lowerCamelCase_ , "_input_quantizer" , lowerCamelCase_ , lowerCamelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase_ , lowerCamelCase_ , "_weight_quantizer" , lowerCamelCase_ , lowerCamelCase_ )
logger.info(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , "_input_quantizer" ) or hasattr(lowerCamelCase_ , "_weight_quantizer" ):
for n in names:
if re.search(lowerCamelCase_ , lowerCamelCase_ ):
set_quantizers(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ : Optional[Any] = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
logger.info(lowerCamelCase_ )
| 568 | 1 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A : str , A : str , A : str ):
'''simple docstring'''
def get_masked_lm_array(A : str ):
UpperCAmelCase = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
UpperCAmelCase = tf.train.load_variable(A , A )
if "kernel" in name:
UpperCAmelCase = array.transpose()
return torch.from_numpy(A )
def get_encoder_array(A : str ):
UpperCAmelCase = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
UpperCAmelCase = tf.train.load_variable(A , A )
if "kernel" in name:
UpperCAmelCase = array.transpose()
return torch.from_numpy(A )
def get_encoder_layer_array(A : int , A : str ):
UpperCAmelCase = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
UpperCAmelCase = tf.train.load_variable(A , A )
if "kernel" in name:
UpperCAmelCase = array.transpose()
return torch.from_numpy(A )
def get_encoder_attention_layer_array(A : int , A : str , A : Any ):
UpperCAmelCase = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
UpperCAmelCase = tf.train.load_variable(A , A )
UpperCAmelCase = array.reshape(A )
if "kernel" in name:
UpperCAmelCase = array.transpose()
return torch.from_numpy(A )
print(f"""Loading model based on config from {config_path}...""" )
UpperCAmelCase = BertConfig.from_json_file(A )
UpperCAmelCase = BertForMaskedLM(A )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCAmelCase = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCAmelCase = layer.attention.self
UpperCAmelCase = get_encoder_attention_layer_array(
A , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
A , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
A , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
A , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
A , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
A , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCAmelCase = layer.attention.output
UpperCAmelCase = get_encoder_attention_layer_array(
A , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
UpperCAmelCase = get_encoder_attention_layer_array(
A , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
UpperCAmelCase = get_encoder_layer_array(A , '''_attention_layer_norm/gamma''' )
UpperCAmelCase = get_encoder_layer_array(A , '''_attention_layer_norm/beta''' )
# Intermediate
UpperCAmelCase = layer.intermediate
UpperCAmelCase = get_encoder_layer_array(A , '''_intermediate_dense/kernel''' )
UpperCAmelCase = get_encoder_layer_array(A , '''_intermediate_dense/bias''' )
# Output
UpperCAmelCase = layer.output
UpperCAmelCase = get_encoder_layer_array(A , '''_output_dense/kernel''' )
UpperCAmelCase = get_encoder_layer_array(A , '''_output_dense/bias''' )
UpperCAmelCase = get_encoder_layer_array(A , '''_output_layer_norm/gamma''' )
UpperCAmelCase = get_encoder_layer_array(A , '''_output_layer_norm/beta''' )
# Embeddings
UpperCAmelCase = get_encoder_array('''_position_embedding_layer/embeddings''' )
UpperCAmelCase = get_encoder_array('''_type_embedding_layer/embeddings''' )
UpperCAmelCase = get_encoder_array('''_embedding_norm_layer/gamma''' )
UpperCAmelCase = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
UpperCAmelCase = model.cls.predictions.transform
UpperCAmelCase = get_masked_lm_array('''dense/kernel''' )
UpperCAmelCase = get_masked_lm_array('''dense/bias''' )
UpperCAmelCase = get_masked_lm_array('''layer_norm/gamma''' )
UpperCAmelCase = get_masked_lm_array('''layer_norm/beta''' )
UpperCAmelCase = get_masked_lm_array('''embedding_table''' )
# Pooling
UpperCAmelCase = BertPooler(config=A )
UpperCAmelCase = get_encoder_array('''_pooler_layer/kernel''' )
UpperCAmelCase = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(A )
# Integration test - should load without any errors ;)
UpperCAmelCase = BertForMaskedLM.from_pretrained(A )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
_lowercase : str = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
_lowercase : str = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 210 |
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCamelCase__ ( A : str = "isbn/0140328726" ):
'''simple docstring'''
UpperCAmelCase = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
UpperCAmelCase = f"""{olid} is not a valid Open Library olid"""
raise ValueError(A )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def lowerCamelCase__ ( A : dict ):
'''simple docstring'''
UpperCAmelCase = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
UpperCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
UpperCAmelCase = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(A , A ):
UpperCAmelCase = ''', '''.join(A )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase : List[str] = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowercase : Optional[int] = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print("""\n""".join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 210 | 1 |
'''simple docstring'''
lowerCamelCase = 0 # The first color of the flag.
lowerCamelCase = 1 # The second color of the flag.
lowerCamelCase = 2 # The third color of the flag.
lowerCamelCase = (red, white, blue)
def a ( lowerCamelCase__ ):
'''simple docstring'''
if not sequence:
return []
if len(A_ ) == 1:
return list(A_ )
A_ : int = 0
A_ : List[Any] = len(A_ ) - 1
A_ : Union[str, Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
A_, A_ : Optional[Any] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
A_, A_ : str = sequence[high], sequence[mid]
high -= 1
else:
A_ : int = f'The elements inside the sequence must contains only {colors} values'
raise ValueError(A_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase = input('''Enter numbers separated by commas:\n''').strip()
lowerCamelCase = [int(item.strip()) for item in user_input.split(''',''')]
print(F"{dutch_national_flag_sort(unsorted)}") | 710 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'linear'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial'
__SCREAMING_SNAKE_CASE : Optional[int] = 'constant'
__SCREAMING_SNAKE_CASE : str = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant'
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : Union[str, Any] = rule_str.split(""":""" )
A_ : Union[str, Any] = int(lowerCamelCase__ )
A_ : List[Any] = float(lowerCamelCase__ )
A_ : Union[str, Any] = value
A_ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ):
'''simple docstring'''
A_ : Optional[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : str = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ):
'''simple docstring'''
A_ : Optional[Any] = SchedulerType(lowerCamelCase__ )
A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) | 686 | 0 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class snake_case__ :
def __init__( self : Optional[int] , __a : List[str] ) -> Any:
'''simple docstring'''
__snake_case : Dict = str(id_ )
__snake_case : Dict = None
__snake_case : Dict = None
__snake_case : Optional[Any] = []
__snake_case : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : List[str] , __a : int ) -> Any:
'''simple docstring'''
return self.key < other.key
def __repr__( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return self.id
def A_ ( self : str , __a : str ) -> List[Any]:
'''simple docstring'''
self.neighbors.append(_lowerCAmelCase )
def A_ ( self : Optional[int] , __a : List[str] , __a : Any ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = weight
def a_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : List[Any] ) -> int:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] ,UpperCamelCase__ )
graph[b - 1].add_edge(graph[a - 1] ,UpperCamelCase__ )
def a_ ( _UpperCAmelCase : list ,_UpperCAmelCase : Vertex ) -> list:
__snake_case : Dict = []
for u in graph:
__snake_case : int = math.inf
__snake_case : Union[str, Any] = None
__snake_case : Optional[int] = 0
__snake_case : Optional[int] = graph[:]
while q:
__snake_case : str = min(UpperCamelCase__ )
q.remove(UpperCamelCase__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__snake_case : List[str] = u
__snake_case : Dict = u.edges[v.id]
for i in range(1 ,len(UpperCamelCase__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def a_ ( _UpperCAmelCase : list ,_UpperCAmelCase : Vertex ) -> Iterator[tuple]:
for u in graph:
__snake_case : Optional[int] = math.inf
__snake_case : Any = None
__snake_case : Optional[int] = 0
__snake_case : List[str] = list(UpperCamelCase__ )
hq.heapify(UpperCamelCase__ )
while h:
__snake_case : List[Any] = hq.heappop(UpperCamelCase__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__snake_case : Dict = u
__snake_case : Tuple = u.edges[v.id]
hq.heapify(UpperCamelCase__ )
for i in range(1 ,len(UpperCamelCase__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def a_ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : jnp.ndarray
@flax_register_to_config
class a ( nn.Module ,__lowercase ,__lowercase ):
SCREAMING_SNAKE_CASE__ : int = 32
SCREAMING_SNAKE_CASE__ : int = 4
SCREAMING_SNAKE_CASE__ : int = 4
SCREAMING_SNAKE_CASE__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
SCREAMING_SNAKE_CASE__ : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE__ : Tuple[int] = (320, 640, 1280, 1280)
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE__ : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE__ : int = 1280
SCREAMING_SNAKE_CASE__ : float = 0.0
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE__ : bool = True
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : bool = False
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = (1, self.in_channels, self.sample_size, self.sample_size)
__SCREAMING_SNAKE_CASE: Tuple = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE: Optional[Any] = jnp.ones((1,) , dtype=jnp.intaa )
__SCREAMING_SNAKE_CASE: Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = jax.random.split(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["params"]
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.block_out_channels
__SCREAMING_SNAKE_CASE: Union[str, Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__SCREAMING_SNAKE_CASE: Any = self.num_attention_heads or self.attention_head_dim
# input
__SCREAMING_SNAKE_CASE: str = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__SCREAMING_SNAKE_CASE: int = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__SCREAMING_SNAKE_CASE: Union[str, Any] = FlaxTimestepEmbedding(_lowerCAmelCase , dtype=self.dtype )
__SCREAMING_SNAKE_CASE: Optional[int] = self.only_cross_attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Any = (num_attention_heads,) * len(self.down_block_types )
# down
__SCREAMING_SNAKE_CASE: Union[str, Any] = []
__SCREAMING_SNAKE_CASE: List[str] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__SCREAMING_SNAKE_CASE: List[str] = output_channel
__SCREAMING_SNAKE_CASE: str = block_out_channels[i]
__SCREAMING_SNAKE_CASE: Any = i == len(_lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__SCREAMING_SNAKE_CASE: str = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE: Tuple = FlaxDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = down_blocks
# mid
__SCREAMING_SNAKE_CASE: Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__SCREAMING_SNAKE_CASE: Optional[int] = []
__SCREAMING_SNAKE_CASE: Tuple = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Optional[int] = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: str = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__SCREAMING_SNAKE_CASE: int = output_channel
__SCREAMING_SNAKE_CASE: List[str] = reversed_block_out_channels[i]
__SCREAMING_SNAKE_CASE: List[str] = reversed_block_out_channels[min(i + 1 , len(_lowerCAmelCase ) - 1 )]
__SCREAMING_SNAKE_CASE: Union[str, Any] = i == len(_lowerCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__SCREAMING_SNAKE_CASE: Optional[int] = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , prev_output_channel=_lowerCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE: int = FlaxUpBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , prev_output_channel=_lowerCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = output_channel
__SCREAMING_SNAKE_CASE: Union[str, Any] = up_blocks
# out
__SCREAMING_SNAKE_CASE: Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__SCREAMING_SNAKE_CASE: Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase = True , _lowerCAmelCase = False , ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , jnp.ndarray ):
__SCREAMING_SNAKE_CASE: Dict = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE: Optional[Any] = timesteps.astype(dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE: Union[str, Any] = jnp.expand_dims(_lowerCAmelCase , 0 )
__SCREAMING_SNAKE_CASE: Any = self.time_proj(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.time_embedding(_lowerCAmelCase )
# 2. pre-process
__SCREAMING_SNAKE_CASE: int = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE: List[str] = self.conv_in(_lowerCAmelCase )
# 3. down
__SCREAMING_SNAKE_CASE: Dict = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[Any] = down_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Tuple = down_block(_lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__SCREAMING_SNAKE_CASE: Union[str, Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCAmelCase , _lowerCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__SCREAMING_SNAKE_CASE: Union[str, Any] = new_down_block_res_samples
# 4. mid
__SCREAMING_SNAKE_CASE: Dict = self.mid_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__SCREAMING_SNAKE_CASE: Union[str, Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
__SCREAMING_SNAKE_CASE: Union[str, Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Any = up_block(
_lowerCAmelCase , temb=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , res_hidden_states_tuple=_lowerCAmelCase , deterministic=not train , )
else:
__SCREAMING_SNAKE_CASE: List[str] = up_block(_lowerCAmelCase , temb=_lowerCAmelCase , res_hidden_states_tuple=_lowerCAmelCase , deterministic=not train )
# 6. post-process
__SCREAMING_SNAKE_CASE: Optional[Any] = self.conv_norm_out(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = nn.silu(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = self.conv_out(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = jnp.transpose(_lowerCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCAmelCase )
| 202 | 0 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__UpperCamelCase : List[Any] = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCamelCase : str = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCamelCase : List[str] = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCamelCase : str = {
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
__UpperCamelCase : Union[str, Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
__UpperCamelCase : Optional[Any] = {
'num_train_timesteps': 151,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def snake_case_ ( __lowercase ):
if isinstance(__lowercase , __lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False ):
UpperCAmelCase_ : Dict = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
UpperCAmelCase_ : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
UpperCAmelCase_ : List[Any] = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
UpperCAmelCase_ : Tuple = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
UpperCAmelCase_ : Tuple = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
UpperCAmelCase_ : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
UpperCAmelCase_ : List[str] = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
UpperCAmelCase_ : Any = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
UpperCAmelCase_ : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
UpperCAmelCase_ : int = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
UpperCAmelCase_ : List[str] = checkpoint[F'''{old_prefix}.skip_connection.weight''']
UpperCAmelCase_ : int = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
UpperCAmelCase_ : List[Any] = checkpoint[F'''{old_prefix}.norm.weight''']
UpperCAmelCase_ : Tuple = checkpoint[F'''{old_prefix}.norm.bias''']
UpperCAmelCase_ : Any = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : Dict = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : str = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : Union[str, Any] = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : Optional[int] = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ : str = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ : Any = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : int = torch.load(__lowercase , map_location='''cpu''' )
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : Optional[int] = checkpoint['''time_embed.0.weight''']
UpperCAmelCase_ : Any = checkpoint['''time_embed.0.bias''']
UpperCAmelCase_ : int = checkpoint['''time_embed.2.weight''']
UpperCAmelCase_ : str = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ : Any = checkpoint['''label_emb.weight''']
UpperCAmelCase_ : Optional[Any] = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase_ : List[Any] = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase_ : List[Any] = unet_config['''down_block_types''']
UpperCAmelCase_ : Tuple = unet_config['''layers_per_block''']
UpperCAmelCase_ : int = unet_config['''attention_head_dim''']
UpperCAmelCase_ : List[Any] = unet_config['''block_out_channels''']
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Union[str, Any] = channels_list[0]
for i, layer_type in enumerate(__lowercase ):
UpperCAmelCase_ : Optional[int] = channels_list[i]
UpperCAmelCase_ : Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowercase ):
UpperCAmelCase_ : List[Any] = F'''down_blocks.{i}.resnets.{j}'''
UpperCAmelCase_ : Tuple = F'''input_blocks.{current_layer}.0'''
UpperCAmelCase_ : int = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ : Any = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowercase ):
UpperCAmelCase_ : Dict = F'''down_blocks.{i}.resnets.{j}'''
UpperCAmelCase_ : int = F'''input_blocks.{current_layer}.0'''
UpperCAmelCase_ : Optional[int] = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ : Tuple = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
UpperCAmelCase_ : Optional[Any] = F'''down_blocks.{i}.attentions.{j}'''
UpperCAmelCase_ : Optional[Any] = F'''input_blocks.{current_layer}.1'''
UpperCAmelCase_ : List[Any] = convert_attention(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
UpperCAmelCase_ : Any = F'''down_blocks.{i}.downsamplers.0'''
UpperCAmelCase_ : str = F'''input_blocks.{current_layer}.0'''
UpperCAmelCase_ : List[str] = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
UpperCAmelCase_ : str = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ : Optional[int] = '''mid_block.resnets.0'''
UpperCAmelCase_ : List[str] = '''middle_block.0'''
UpperCAmelCase_ : List[Any] = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
UpperCAmelCase_ : Optional[Any] = '''mid_block.attentions.0'''
UpperCAmelCase_ : int = '''middle_block.1'''
UpperCAmelCase_ : List[Any] = convert_attention(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
UpperCAmelCase_ : Optional[Any] = '''mid_block.resnets.1'''
UpperCAmelCase_ : Union[str, Any] = '''middle_block.2'''
UpperCAmelCase_ : Optional[Any] = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[Any] = unet_config['''up_block_types''']
for i, layer_type in enumerate(__lowercase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ : Tuple = F'''up_blocks.{i}.resnets.{j}'''
UpperCAmelCase_ : List[str] = F'''output_blocks.{current_layer}.0'''
UpperCAmelCase_ : Optional[int] = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
UpperCAmelCase_ : Optional[Any] = F'''up_blocks.{i}.upsamplers.0'''
UpperCAmelCase_ : Tuple = F'''output_blocks.{current_layer-1}.1'''
UpperCAmelCase_ : Tuple = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ : Tuple = F'''up_blocks.{i}.resnets.{j}'''
UpperCAmelCase_ : Tuple = F'''output_blocks.{current_layer}.0'''
UpperCAmelCase_ : Optional[Any] = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase , has_skip=__lowercase )
UpperCAmelCase_ : Optional[Any] = F'''up_blocks.{i}.attentions.{j}'''
UpperCAmelCase_ : Union[str, Any] = F'''output_blocks.{current_layer}.1'''
UpperCAmelCase_ : Union[str, Any] = convert_attention(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
UpperCAmelCase_ : List[Any] = F'''up_blocks.{i}.upsamplers.0'''
UpperCAmelCase_ : Tuple = F'''output_blocks.{current_layer-1}.2'''
UpperCAmelCase_ : Dict = convert_resnet(__lowercase , __lowercase , __lowercase , __lowercase )
UpperCAmelCase_ : str = checkpoint['''out.0.weight''']
UpperCAmelCase_ : Optional[Any] = checkpoint['''out.0.bias''']
UpperCAmelCase_ : Union[str, Any] = checkpoint['''out.2.weight''']
UpperCAmelCase_ : int = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
__UpperCamelCase : str = parser.parse_args()
__UpperCamelCase : Optional[Any] = strabool(args.class_cond)
__UpperCamelCase : Optional[Any] = os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
__UpperCamelCase : Dict = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCamelCase : Optional[Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__UpperCamelCase : Optional[Any] = TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
__UpperCamelCase : List[Any] = None
__UpperCamelCase : Optional[Any] = con_pt_to_diffuser(args.unet_path, unet_config)
__UpperCamelCase : Optional[int] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__UpperCamelCase : Optional[Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__UpperCamelCase : List[str] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCamelCase : Optional[Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
__UpperCamelCase : List[str] = CMStochasticIterativeScheduler(**scheduler_config)
__UpperCamelCase : List[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path) | 641 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__( snake_case__ ):
'''simple docstring'''
A_ : str = ['image_processor', 'tokenizer']
A_ : int = 'LayoutLMv2ImageProcessor'
A_ : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __snake_case , )
UpperCAmelCase_ : List[Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Optional[int] , ):
'''simple docstring'''
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
UpperCAmelCase_ : Tuple = self.image_processor(images=__snake_case , return_tensors=__snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_ : Any = features['''words''']
UpperCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel values
UpperCAmelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase_ : Optional[int] = self.get_overflowing_images(__snake_case , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase_ : List[Any] = images
return encoded_inputs
def _lowerCamelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__snake_case )} and {len(__snake_case )}''' )
return images_with_overflow
def _lowerCamelCase ( self : List[Any] , *__snake_case : Optional[int] , **__snake_case : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def _lowerCamelCase ( self : str , *__snake_case : Optional[Any] , **__snake_case : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __snake_case , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __snake_case , )
return self.image_processor | 641 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
A_ : int = False
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self ):
return 1_2
@property
def __UpperCamelCase ( self ):
return 1_2
@property
def __UpperCamelCase ( self ):
return 3_2
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__SCREAMING_SNAKE_CASE )
@property
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Any = 1_2
snake_case__ : Any = 1_2
snake_case__ : Any = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
snake_case__ : int = TransformeraDModel(**__SCREAMING_SNAKE_CASE )
return model
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = """cpu"""
snake_case__ : int = self.dummy_vqvae
snake_case__ : Union[str, Any] = self.dummy_text_encoder
snake_case__ : str = self.dummy_tokenizer
snake_case__ : List[Any] = self.dummy_transformer
snake_case__ : Union[str, Any] = VQDiffusionScheduler(self.num_embed )
snake_case__ : Tuple = LearnedClassifierFreeSamplingEmbeddings(learnable=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = VQDiffusionPipeline(
vqvae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , transformer=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , learned_classifier_free_sampling_embeddings=__SCREAMING_SNAKE_CASE , )
snake_case__ : List[str] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = """teddy bear playing in the pool"""
snake_case__ : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
snake_case__ : Union[str, Any] = pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""np""" )
snake_case__ : List[Any] = output.images
snake_case__ : str = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
snake_case__ : Optional[int] = pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , return_dict=__SCREAMING_SNAKE_CASE , num_inference_steps=2 )[0]
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
snake_case__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case__ : List[Any] = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self ):
snake_case__ : int = """cpu"""
snake_case__ : Optional[Any] = self.dummy_vqvae
snake_case__ : Optional[int] = self.dummy_text_encoder
snake_case__ : List[Any] = self.dummy_tokenizer
snake_case__ : List[str] = self.dummy_transformer
snake_case__ : Tuple = VQDiffusionScheduler(self.num_embed )
snake_case__ : Dict = LearnedClassifierFreeSamplingEmbeddings(
learnable=__SCREAMING_SNAKE_CASE , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case__ : Union[str, Any] = VQDiffusionPipeline(
vqvae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , transformer=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , learned_classifier_free_sampling_embeddings=__SCREAMING_SNAKE_CASE , )
snake_case__ : Any = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """teddy bear playing in the pool"""
snake_case__ : Optional[Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
snake_case__ : Optional[Any] = pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""np""" )
snake_case__ : int = output.images
snake_case__ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
snake_case__ : Any = pipe(
[prompt] , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , return_dict=__SCREAMING_SNAKE_CASE , num_inference_steps=2 )[0]
snake_case__ : Optional[Any] = image[0, -3:, -3:, -1]
snake_case__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
snake_case__ : Optional[Any] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
snake_case__ : Dict = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
snake_case__ : int = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case__ : List[str] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(0 )
snake_case__ : List[str] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
snake_case__ : Any = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 38 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def _lowercase ( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
SCREAMING_SNAKE_CASE__ = 128
elif "12-12" in model_name:
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 12
elif "14-14" in model_name:
SCREAMING_SNAKE_CASE__ = 14
SCREAMING_SNAKE_CASE__ = 14
elif "16-16" in model_name:
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 16
else:
raise ValueError('Model not supported' )
SCREAMING_SNAKE_CASE__ = 'huggingface/label-files'
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE__ = 35
SCREAMING_SNAKE_CASE__ = 'speech-commands-v2-id2label.json'
else:
SCREAMING_SNAKE_CASE__ = 527
SCREAMING_SNAKE_CASE__ = 'audioset-id2label.json'
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
return config
def _lowercase ( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
if "module.v" in name:
SCREAMING_SNAKE_CASE__ = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
SCREAMING_SNAKE_CASE__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
SCREAMING_SNAKE_CASE__ = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
SCREAMING_SNAKE_CASE__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
SCREAMING_SNAKE_CASE__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
SCREAMING_SNAKE_CASE__ = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
SCREAMING_SNAKE_CASE__ = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
SCREAMING_SNAKE_CASE__ = key.split('.' )
SCREAMING_SNAKE_CASE__ = int(key_split[3] )
SCREAMING_SNAKE_CASE__ = config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[:dim]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ = val[-dim:]
else:
SCREAMING_SNAKE_CASE__ = val
return orig_state_dict
def _lowercase ( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_audio_spectrogram_transformer_config(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
SCREAMING_SNAKE_CASE__ = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location='cpu' )
# remove some keys
remove_keys(UpperCamelCase_ )
# rename some keys
SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
# load 🤗 model
SCREAMING_SNAKE_CASE__ = ASTForAudioClassification(UpperCamelCase_ )
model.eval()
model.load_state_dict(UpperCamelCase_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
SCREAMING_SNAKE_CASE__ = -4.2_677_393 if 'speech-commands' not in model_name else -6.845_978
SCREAMING_SNAKE_CASE__ = 4.5_689_974 if 'speech-commands' not in model_name else 5.5_654_526
SCREAMING_SNAKE_CASE__ = 1024 if 'speech-commands' not in model_name else 128
SCREAMING_SNAKE_CASE__ = ASTFeatureExtractor(mean=UpperCamelCase_ , std=UpperCamelCase_ , max_length=UpperCamelCase_ )
if "speech-commands" in model_name:
SCREAMING_SNAKE_CASE__ = load_dataset('speech_commands' , 'v0.02' , split='validation' )
SCREAMING_SNAKE_CASE__ = dataset[0]['audio']['array']
else:
SCREAMING_SNAKE_CASE__ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = torchaudio.load(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = waveform.squeeze().numpy()
SCREAMING_SNAKE_CASE__ = feature_extractor(UpperCamelCase_ , sampling_rate=16000 , return_tensors='pt' )
# forward pass
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
SCREAMING_SNAKE_CASE__ = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
SCREAMING_SNAKE_CASE__ = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
SCREAMING_SNAKE_CASE__ = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
SCREAMING_SNAKE_CASE__ = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
SCREAMING_SNAKE_CASE__ = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1e-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase_ )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 472 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
A : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700 |
def _lowerCAmelCase ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
__snake_case = set()
# Replace all the whitespace in our sentence
__snake_case = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowerCAmelCase ) == 26
def _lowerCAmelCase ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
__snake_case = [False] * 26
for char in input_str:
if char.islower():
__snake_case = True
elif char.isupper():
__snake_case = True
return all(_lowerCAmelCase )
def _lowerCAmelCase ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _lowerCAmelCase ( ) -> None:
'''simple docstring'''
from timeit import timeit
__snake_case = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=_lowerCAmelCase ) )
print(timeit("is_pangram_faster()" , setup=_lowerCAmelCase ) )
print(timeit("is_pangram_fastest()" , setup=_lowerCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 473 | 0 |
def __snake_case ( _UpperCamelCase ) -> Union[str, Any]: # noqa: E741
_a = len(lowercase_ )
_a = 0
_a = [0] * n
_a = [False] * n
_a = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if parent == root:
out_edge_count += 1
_a = True
_a = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_a = dfs(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_a = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_a = True
# AP found via cycle
if at == low[to]:
_a = True
else:
_a = min(low[at] , lowercase_ )
return out_edge_count
for i in range(lowercase_ ):
if not visited[i]:
_a = 0
_a = dfs(lowercase_ , lowercase_ , -1 , lowercase_ )
_a = out_edge_count > 1
for x in range(len(lowercase_ ) ):
if is_art[x] is True:
print(lowercase_ )
# Adjacency list of graph
lowerCamelCase :Any = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 487 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , **_lowerCamelCase :List[str] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :List[str] , **_lowerCamelCase :Optional[int] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : Tuple = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : int = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
__SCREAMING_SNAKE_CASE : str = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
__SCREAMING_SNAKE_CASE : int = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = image_processor(_lowerCamelCase , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE : Tuple = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
__SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''lower newer'''
__SCREAMING_SNAKE_CASE : Optional[int] = processor(text=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self :str ):
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : int = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = '''lower newer'''
__SCREAMING_SNAKE_CASE : int = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : int = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_lowerCamelCase ):
processor()
def SCREAMING_SNAKE_CASE_ ( self :Any ):
__SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE : Tuple = processor.batch_decode(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = '''lower newer'''
__SCREAMING_SNAKE_CASE : Dict = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : str = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 674 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {'vocab_file': 'vocab.txt'}
__lowerCAmelCase : Optional[Any] = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
__lowerCAmelCase : List[str] = {
'openbmb/cpm-ant-10b': 1024,
}
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = collections.OrderedDict()
with open(A_, """r""", encoding="""utf-8""" ) as reader:
__magic_name__ = reader.readlines()
for index, token in enumerate(A_ ):
__magic_name__ = token.rstrip("""\n""" )
__magic_name__ = index
return vocab
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : int=200 ) -> Tuple:
"""simple docstring"""
__magic_name__ = vocab
__magic_name__ = unk_token
__magic_name__ = max_input_chars_per_word
def _lowercase ( self : str , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = list(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
__magic_name__ = 0
__magic_name__ = []
while start < len(UpperCamelCase__ ):
__magic_name__ = len(UpperCamelCase__ )
__magic_name__ = None
while start < end:
__magic_name__ = """""".join(chars[start:end] )
if substr in self.vocab:
__magic_name__ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCamelCase__ )
__magic_name__ = end
return sub_tokens
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["""input_ids""", """attention_mask"""]
a__ = False
def __init__( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]="<d>" , UpperCamelCase__ : int="</d>" , UpperCamelCase__ : int="<s>" , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : List[str]="<pad>" , UpperCamelCase__ : Any="<unk>" , UpperCamelCase__ : Optional[int]="</n>" , UpperCamelCase__ : Union[str, Any]="</_>" , UpperCamelCase__ : Optional[Any]="left" , **UpperCamelCase__ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=UpperCamelCase__ , eod_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , line_token=UpperCamelCase__ , space_token=UpperCamelCase__ , padding_side=UpperCamelCase__ , **UpperCamelCase__ , )
__magic_name__ = bod_token
__magic_name__ = eod_token
__magic_name__ = load_vocab(UpperCamelCase__ )
__magic_name__ = self.encoder[space_token]
__magic_name__ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__magic_name__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase__ : x[1] ) )
__magic_name__ = {v: k for k, v in self.encoder.items()}
__magic_name__ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return self.encoder["\n"]
@property
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
return len(self.encoder )
def _lowercase ( self : Tuple ) -> Tuple:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Any , UpperCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
__magic_name__ = []
for x in jieba.cut(UpperCamelCase__ , cut_all=UpperCamelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase__ ) )
return output_tokens
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = [i for i in token_ids if i >= 0]
__magic_name__ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def _lowercase ( self : List[Any] , UpperCamelCase__ : List[str] ) -> str:
"""simple docstring"""
return "".join(UpperCamelCase__ )
def _lowercase ( self : Dict , UpperCamelCase__ : Any ) -> Dict:
"""simple docstring"""
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def _lowercase ( self : int , UpperCamelCase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
return self.decoder.get(UpperCamelCase__ , self.unk_token )
def _lowercase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(UpperCamelCase__ ):
__magic_name__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__magic_name__ = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
__magic_name__ = 0
if " " in self.encoder:
__magic_name__ = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
__magic_name__ = self.encoder["""\n"""]
del self.encoder["\n"]
__magic_name__ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase__ : x[1] ) )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__magic_name__ = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowercase ( self : List[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ ))
return [1] + ([0] * len(UpperCamelCase__ ))
| 76 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """cvt"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
__magic_name__ = num_channels
__magic_name__ = patch_sizes
__magic_name__ = patch_stride
__magic_name__ = patch_padding
__magic_name__ = embed_dim
__magic_name__ = num_heads
__magic_name__ = depth
__magic_name__ = mlp_ratio
__magic_name__ = attention_drop_rate
__magic_name__ = drop_rate
__magic_name__ = drop_path_rate
__magic_name__ = qkv_bias
__magic_name__ = cls_token
__magic_name__ = qkv_projection_method
__magic_name__ = kernel_qkv
__magic_name__ = padding_kv
__magic_name__ = stride_kv
__magic_name__ = padding_q
__magic_name__ = stride_q
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
| 76 | 1 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[Any] ) -> int:
if isinstance(snake_case__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class A_ :
"""simple docstring"""
def __UpperCAmelCase ( self : str ,__A : List[str] ,__A : Dict ) -> Union[str, Any]:
pass
def __UpperCAmelCase ( self : Any ) -> int:
pass
def __UpperCAmelCase ( self : Any ) -> Tuple:
pass
def __UpperCAmelCase ( self : Union[str, Any] ,__A : np.ndarray ,__A : np.ndarray ,__A : float ) -> Optional[Any]:
_lowercase = np.abs((a - b) ).max()
self.assertLessEqual(__A ,__A ,F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __UpperCAmelCase ( self : Optional[Any] ,__A : Optional[int] ,__A : int ,__A : str ,__A : Optional[Any] ,__A : int=None ,**__A : int ) -> Any:
_lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(__A ,__A )
_lowercase = FlaxVisionTextDualEncoderModel(__A )
_lowercase = model(input_ids=__A ,pixel_values=__A ,attention_mask=__A )
self.assertEqual(output['text_embeds'].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape ,(pixel_values.shape[0], config.projection_dim) )
def __UpperCAmelCase ( self : str ,__A : List[str] ,__A : Optional[Any] ,__A : Union[str, Any] ,__A : int ,__A : List[str]=None ,**__A : Tuple ) -> Dict:
_lowercase , _lowercase = self.get_vision_text_model(__A ,__A )
_lowercase = {'vision_model': vision_model, 'text_model': text_model}
_lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__A )
_lowercase = model(input_ids=__A ,pixel_values=__A ,attention_mask=__A )
self.assertEqual(output['text_embeds'].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self : List[str] ,__A : Optional[int] ,__A : List[str] ,__A : Optional[int] ,__A : Union[str, Any] ,__A : Union[str, Any]=None ,**__A : Optional[Any] ) -> List[Any]:
_lowercase , _lowercase = self.get_vision_text_model(__A ,__A )
_lowercase = {'vision_model': vision_model, 'text_model': text_model}
_lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__A )
_lowercase = model(input_ids=__A ,pixel_values=__A ,attention_mask=__A )
_lowercase = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
_lowercase = FlaxVisionTextDualEncoderModel.from_pretrained(__A )
_lowercase = model(input_ids=__A ,pixel_values=__A ,attention_mask=__A )
_lowercase = after_output[0]
_lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A ,1e-3 )
def __UpperCAmelCase ( self : List[str] ,__A : Any ,__A : Union[str, Any] ,__A : Tuple ,__A : Tuple ,__A : Any=None ,**__A : Union[str, Any] ) -> Optional[Any]:
_lowercase , _lowercase = self.get_vision_text_model(__A ,__A )
_lowercase = {'vision_model': vision_model, 'text_model': text_model}
_lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__A )
_lowercase = model(
input_ids=__A ,pixel_values=__A ,attention_mask=__A ,output_attentions=__A )
_lowercase = output.vision_model_output.attentions
self.assertEqual(len(__A ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase = to_atuple(vision_model.config.image_size )
_lowercase = to_atuple(vision_model.config.patch_size )
_lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
_lowercase = output.text_model_output.attentions
self.assertEqual(len(__A ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def __UpperCAmelCase ( self : Any ,__A : Optional[int] ,__A : Union[str, Any] ,__A : Union[str, Any] ) -> List[str]:
pt_model.to(__A )
pt_model.eval()
# prepare inputs
_lowercase = inputs_dict
_lowercase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_lowercase = pt_model(**__A ).to_tuple()
_lowercase = fx_model(**__A ).to_tuple()
self.assertEqual(len(__A ) ,len(__A ) ,'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(__A ,pt_output.numpy() ,4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__A )
_lowercase = FlaxVisionTextDualEncoderModel.from_pretrained(__A ,from_pt=__A )
_lowercase = fx_model_loaded(**__A ).to_tuple()
self.assertEqual(len(__A ) ,len(__A ) ,'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(__A ,pt_output.numpy() ,4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__A )
_lowercase = VisionTextDualEncoderModel.from_pretrained(__A ,from_flax=__A )
pt_model_loaded.to(__A )
pt_model_loaded.eval()
with torch.no_grad():
_lowercase = pt_model_loaded(**__A ).to_tuple()
self.assertEqual(len(__A ) ,len(__A ) ,'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4] ):
self.assert_almost_equals(__A ,pt_output_loaded.numpy() ,4e-2 )
def __UpperCAmelCase ( self : Optional[int] ,__A : Optional[int] ,__A : Tuple ,__A : int ) -> Optional[int]:
_lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(__A ,__A )
_lowercase = VisionTextDualEncoderModel(__A )
_lowercase = FlaxVisionTextDualEncoderModel(__A )
_lowercase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,__A )
_lowercase = fx_state
self.check_pt_flax_equivalence(__A ,__A ,__A )
def __UpperCAmelCase ( self : Any ,__A : int ,__A : Tuple ,__A : Tuple ) -> Optional[Any]:
_lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(__A ,__A )
_lowercase = VisionTextDualEncoderModel(__A )
_lowercase = FlaxVisionTextDualEncoderModel(__A )
_lowercase = load_flax_weights_in_pytorch_model(__A ,fx_model.params )
self.check_pt_flax_equivalence(__A ,__A ,__A )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__A )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
_lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__A )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
_lowercase = self.prepare_config_and_inputs()
self.check_save_load(**__A )
def __UpperCAmelCase ( self : List[str] ) -> Any:
_lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__A )
@is_pt_flax_cross_test
def __UpperCAmelCase ( self : Tuple ) -> Any:
_lowercase = self.prepare_config_and_inputs()
_lowercase = config_inputs_dict.pop('vision_config' )
_lowercase = config_inputs_dict.pop('text_config' )
_lowercase = config_inputs_dict
self.check_equivalence_pt_to_flax(__A ,__A ,__A )
self.check_equivalence_flax_to_pt(__A ,__A ,__A )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
_lowercase , _lowercase = self.get_pretrained_model_and_inputs()
_lowercase = model_a(**__A )
_lowercase = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__A )
_lowercase = FlaxVisionTextDualEncoderModel.from_pretrained(__A )
_lowercase = model_a(**__A )
_lowercase = after_outputs[0]
_lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A ,1e-5 )
@require_flax
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
_lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' ,'hf-internal-testing/tiny-bert' ,vision_from_pt=__A ,text_from_pt=__A ,)
_lowercase = 13
_lowercase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_lowercase = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
_lowercase = random_attention_mask([batch_size, 4] )
_lowercase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self : Optional[Any] ,__A : Tuple ,__A : Tuple ) -> Optional[int]:
_lowercase = FlaxViTModel(__A )
_lowercase = FlaxBertModel(__A )
return vision_model, text_model
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
_lowercase = FlaxViTModelTester(self )
_lowercase = FlaxBertModelTester(self )
_lowercase = vit_model_tester.prepare_config_and_inputs()
_lowercase = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' ,'hf-internal-testing/tiny-bert' ,vision_from_pt=__A ,text_from_pt=__A ,)
_lowercase = 13
_lowercase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_lowercase = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
_lowercase = random_attention_mask([batch_size, 4] )
_lowercase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self : Optional[Any] ,__A : Dict ,__A : Optional[int] ) -> str:
_lowercase = FlaxCLIPVisionModel(__A )
_lowercase = FlaxBertModel(__A )
return vision_model, text_model
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
_lowercase = FlaxCLIPVisionModelTester(self )
_lowercase = FlaxBertModelTester(self )
_lowercase = clip_model_tester.prepare_config_and_inputs()
_lowercase = bert_model_tester.prepare_config_and_inputs()
_lowercase , _lowercase = vision_config_and_inputs
_lowercase , _lowercase , _lowercase , _lowercase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Dict ) -> Any:
_lowercase = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' ,logit_scale_init_value=1.0 )
_lowercase = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowercase = processor(
text=['una foto di un gatto', 'una foto di un cane'] ,images=__A ,padding=__A ,return_tensors='np' )
_lowercase = model(**__A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
_lowercase = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image ,__A ,atol=1e-3 ) ) | 67 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = ['''image_processor''', '''tokenizer''']
A__ = '''CLIPImageProcessor'''
A__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Any , __a : str=None , __a : List[Any]=None , **__a : List[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __a , )
__snake_case : List[str] = kwargs.pop('feature_extractor' )
__snake_case : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__a , __a )
def __call__( self : List[Any] , __a : Optional[int]=None , __a : Optional[int]=None , __a : Union[str, Any]=None , **__a : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__snake_case : Any = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__snake_case : str = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__snake_case : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def A_ ( self : List[Any] , *__a : Dict , **__a : Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a )
def A_ ( self : str , *__a : Tuple , **__a : List[str] ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a )
@property
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Tuple = self.tokenizer.model_input_names
__snake_case : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Any ) -> Any:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __a , )
return self.image_processor_class
@property
def A_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __a , )
return self.image_processor
| 286 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Optional[int] ={"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict =[
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : List[str] =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase : int =25_0004
UpperCAmelCase : Dict =25_0020
@require_sentencepiece
@require_tokenizers
class _lowercase (a_ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = MBartTokenizer
lowercase__ = MBartTokenizerFast
lowercase__ = True
lowercase__ = True
def _lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ = MBartTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = MBartTokenizer(snake_case__ , keep_accents=snake_case__ )
UpperCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCamelCase_ = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _lowerCamelCase ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCamelCase_ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase_ = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCamelCase_ = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = tokenizer_r.save_pretrained(snake_case__ )
UpperCamelCase_ = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCamelCase_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
UpperCamelCase_ = tokenizer_r.from_pretrained(snake_case__ )
UpperCamelCase_ = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
UpperCamelCase_ = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
UpperCamelCase_ = tokenizer_r.from_pretrained(snake_case__ )
UpperCamelCase_ = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
UpperCamelCase_ = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase_ = tokenizer_r.from_pretrained(snake_case__ )
UpperCamelCase_ = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase (unittest.TestCase ):
'''simple docstring'''
lowercase__ = """facebook/mbart-large-en-ro"""
lowercase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowercase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowercase__ = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def _lowerCamelCase ( cls ):
'''simple docstring'''
UpperCamelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
UpperCamelCase_ = 1
return cls
def _lowerCamelCase ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_0020 )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
UpperCamelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
UpperCamelCase_ = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
UpperCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , snake_case__ )
UpperCamelCase_ = 10
UpperCamelCase_ = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , snake_case__ )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_0026, 25_0001] )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
UpperCamelCase_ = MBartTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors="pt" )
UpperCamelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCamelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors="pt" )
UpperCamelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors="pt" )
UpperCamelCase_ = targets["input_ids"]
UpperCamelCase_ = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 25_0004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_0001,
} , )
| 504 | 0 |
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
for i in range(0 ,lowerCAmelCase_ ):
for _ in range(0 ,n - i - 1 ): # printing spaces
print(' ' ,end='' )
for _ in range(0 ,i + 1 ): # printing stars
print('* ' ,end='' )
print()
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[str] ) -> str:
"""simple docstring"""
for i in range(lowerCAmelCase_ ,0 ,-1 ):
for _ in range(lowerCAmelCase_ ,0 ,-1 ): # printing stars
print('* ' ,end='' )
print()
for _ in range(n - i + 1 ,0 ,-1 ): # printing spaces
print(' ' ,end='' )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCAmelCase_ ) # upper half
reverse_floyd(lowerCAmelCase_ ) # lower half
if __name__ == "__main__":
print(r'| /\ | |- | |- |--| |\ /| |-')
print(r'|/ \| |- |_ |_ |__| | \/ | |_')
__SCREAMING_SNAKE_CASE = 1
while K:
__SCREAMING_SNAKE_CASE = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__SCREAMING_SNAKE_CASE = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 220 |
from __future__ import annotations
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any =order
# a_{0} ... a_{k}
SCREAMING_SNAKE_CASE_ : List[str] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
SCREAMING_SNAKE_CASE_ : List[str] =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
SCREAMING_SNAKE_CASE_ : List[Any] =[0.0] * self.order
# y[n-1] ... y[n-k]
SCREAMING_SNAKE_CASE_ : Any =[0.0] * self.order
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
if len(__UpperCAmelCase ) < self.order:
SCREAMING_SNAKE_CASE_ : str =[1.0, *a_coeffs]
if len(__UpperCAmelCase ) != self.order + 1:
SCREAMING_SNAKE_CASE_ : List[Any] =(
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(__UpperCAmelCase )}"""
)
raise ValueError(__UpperCAmelCase )
if len(__UpperCAmelCase ) != self.order + 1:
SCREAMING_SNAKE_CASE_ : List[str] =(
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(__UpperCAmelCase )}"""
)
raise ValueError(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =a_coeffs
SCREAMING_SNAKE_CASE_ : int =b_coeffs
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : str =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
SCREAMING_SNAKE_CASE_ : Tuple =self.input_history[:-1]
SCREAMING_SNAKE_CASE_ : List[Any] =self.output_history[:-1]
SCREAMING_SNAKE_CASE_ : Any =sample
SCREAMING_SNAKE_CASE_ : Dict =result
return result
| 220 | 1 |
def UpperCamelCase_ ( a_ , a_ ) ->int:
A =1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
A =n - k
# Calculate C(n,k)
for i in range(a_ ):
result *= n - i
result //= i + 1
return result
def UpperCamelCase_ ( a_ ) ->int:
return binomial_coefficient(2 * node_count , a_ ) // (node_count + 1)
def UpperCamelCase_ ( a_ ) ->int:
if n < 0:
raise ValueError("factorial() not defined for negative values" )
A =1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCamelCase_ ( a_ ) ->int:
return catalan_number(a_ ) * factorial(a_ )
if __name__ == "__main__":
__a = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 689 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
__a = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
__a = {
"""ctrl""": 2_5_6,
}
__a = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def UpperCamelCase_ ( a_ ) ->List[str]:
A =set()
A =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A =char
A =set(a_ )
return pairs
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTROL_CODES
def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]="<unk>" , **snake_case__ : List[str] ):
"""simple docstring"""
super().__init__(unk_token=snake_case__ , **snake_case__ )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
A =json.load(snake_case__ )
A ={v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
A =merges_handle.read().split("\n" )[1:-1]
A =[tuple(merge.split() ) for merge in merges]
A =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
A ={}
@property
def _a ( self : str ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : int , snake_case__ : Any ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A =tuple(snake_case__ )
A =tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
A =get_pairs(snake_case__ )
if not pairs:
return token
while True:
A =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A , A =bigram
A =[]
A =0
while i < len(snake_case__ ):
try:
A =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A =tuple(snake_case__ )
A =new_word
if len(snake_case__ ) == 1:
break
else:
A =get_pairs(snake_case__ )
A ="@@ ".join(snake_case__ )
A =word[:-4]
A =word
return word
def _a ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
A =[]
A =re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def _a ( self : List[str] , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
return self.decoder.get(snake_case__ , self.unk_token )
def _a ( self : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
A =" ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def _a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A =os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
A =0
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
A =token_index
writer.write(" ".join(snake_case__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 689 | 1 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
A_ : List[str] ={
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
A_ : List[str] =logging.get_logger(__name__)
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : str = "maskformer"
SCREAMING_SNAKE_CASE__ : Optional[int] = {"hidden_size": "mask_feature_size"}
SCREAMING_SNAKE_CASE__ : Any = ["resnet", "swin"]
SCREAMING_SNAKE_CASE__ : int = ["detr"]
def __init__( self , a__ = 2_56 , a__ = 2_56 , a__ = 0.1 , a__ = False , a__ = None , a__ = None , a__ = 0.02 , a__ = 1.0 , a__ = 1.0 , a__ = 1.0 , a__ = 20.0 , a__ = None , **a__ , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_lowerCamelCase = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(a__ , a__ ):
_lowerCamelCase = backbone_config.pop('model_type' )
_lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase = config_class.from_dict(a__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_lowerCamelCase = DetrConfig()
else:
# verify that the decoder is supported
_lowerCamelCase = (
decoder_config.pop('model_type' ) if isinstance(a__ , a__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported )}' )
if isinstance(a__ , a__ ):
_lowerCamelCase = CONFIG_MAPPING[decoder_type]
_lowerCamelCase = config_class.from_dict(a__ )
_lowerCamelCase = backbone_config
_lowerCamelCase = decoder_config
# main feature dimension for the model
_lowerCamelCase = fpn_feature_size
_lowerCamelCase = mask_feature_size
# initializer
_lowerCamelCase = init_std
_lowerCamelCase = init_xavier_std
# Hungarian matcher && loss
_lowerCamelCase = cross_entropy_weight
_lowerCamelCase = dice_weight
_lowerCamelCase = mask_weight
_lowerCamelCase = use_auxiliary_loss
_lowerCamelCase = no_object_weight
_lowerCamelCase = output_auxiliary_logits
_lowerCamelCase = self.decoder_config.encoder_attention_heads
_lowerCamelCase = self.decoder_config.num_hidden_layers
super().__init__(**a__ )
@classmethod
def snake_case_ ( cls , a__ , a__ , **a__ ):
return cls(
backbone_config=a__ , decoder_config=a__ , **a__ , )
def snake_case_ ( self ):
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.backbone_config.to_dict()
_lowerCamelCase = self.decoder_config.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
| 650 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
A_ : int =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] , snake_case : Tuple , snake_case : Union[str, Any] )-> Optional[Any]:
_lowerCamelCase = WavaVecaForSequenceClassification.from_pretrained(snake_case , config=snake_case )
_lowerCamelCase = downstream_dict['projector.weight']
_lowerCamelCase = downstream_dict['projector.bias']
_lowerCamelCase = downstream_dict['model.post_net.linear.weight']
_lowerCamelCase = downstream_dict['model.post_net.linear.bias']
return model
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] , snake_case : Dict , snake_case : Optional[int] )-> List[str]:
_lowerCamelCase = WavaVecaForAudioFrameClassification.from_pretrained(snake_case , config=snake_case )
_lowerCamelCase = downstream_dict['model.linear.weight']
_lowerCamelCase = downstream_dict['model.linear.bias']
return model
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : List[str] , snake_case : Any )-> Tuple:
_lowerCamelCase = WavaVecaForXVector.from_pretrained(snake_case , config=snake_case )
_lowerCamelCase = downstream_dict['connector.weight']
_lowerCamelCase = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_lowerCamelCase = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
_lowerCamelCase = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
_lowerCamelCase = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
_lowerCamelCase = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
_lowerCamelCase = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
_lowerCamelCase = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
_lowerCamelCase = downstream_dict['objective.W']
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( snake_case : List[str] , snake_case : Optional[Any] , snake_case : int , snake_case : Dict )-> str:
_lowerCamelCase = torch.load(snake_case , map_location='cpu' )
_lowerCamelCase = checkpoint['Downstream']
_lowerCamelCase = WavaVecaConfig.from_pretrained(snake_case )
_lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(
snake_case , return_attention_mask=snake_case , do_normalize=snake_case )
_lowerCamelCase = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
_lowerCamelCase = convert_classification(snake_case , snake_case , snake_case )
elif arch.endswith('ForAudioFrameClassification' ):
_lowerCamelCase = convert_diarization(snake_case , snake_case , snake_case )
elif arch.endswith('ForXVector' ):
_lowerCamelCase = convert_xvector(snake_case , snake_case , snake_case )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
_lowerCamelCase = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(snake_case )
hf_model.save_pretrained(snake_case )
if __name__ == "__main__":
A_ : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
A_ : List[Any] =parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 650 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case_ ( lowerCAmelCase ):
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(__lowerCAmelCase , 'depth_multiplier' ) )
class snake_case_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=3 , __lowerCAmelCase=32 , __lowerCAmelCase=0.25 , __lowerCAmelCase=8 , __lowerCAmelCase=8 , __lowerCAmelCase=6 , __lowerCAmelCase=32 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="relu6" , __lowerCAmelCase=1_280 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=10 , __lowerCAmelCase=None , ):
SCREAMING_SNAKE_CASE_ : Any = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Any = num_channels
SCREAMING_SNAKE_CASE_ : int = image_size
SCREAMING_SNAKE_CASE_ : Any = depth_multiplier
SCREAMING_SNAKE_CASE_ : str = depth_divisible_by
SCREAMING_SNAKE_CASE_ : Dict = min_depth
SCREAMING_SNAKE_CASE_ : int = expand_ratio
SCREAMING_SNAKE_CASE_ : Optional[int] = tf_padding
SCREAMING_SNAKE_CASE_ : Dict = output_stride
SCREAMING_SNAKE_CASE_ : Optional[Any] = first_layer_is_expansion
SCREAMING_SNAKE_CASE_ : int = finegrained_output
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
SCREAMING_SNAKE_CASE_ : Tuple = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ : str = use_labels
SCREAMING_SNAKE_CASE_ : List[Any] = is_training
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = scope
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = MobileNetVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = self.num_labels
SCREAMING_SNAKE_CASE_ : int = MobileNetVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = MobileNetVaForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE_ : int = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__lowerCamelCase : Any = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Dict = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : str = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : str = False
__lowerCamelCase : Union[str, Any] = False
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = MobileNetVaModelTester(self )
SCREAMING_SNAKE_CASE_ : Any = MobileNetVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def __A ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def __A ( self ):
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def __A ( self ):
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def __A ( self ):
pass
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Any = model_class(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __A ( self ):
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE_ : str = 16
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : List[Any] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def __A ( self ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Dict = MobileNetVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def __A ( self ):
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE_ : str = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(**__lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_ : str = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
SCREAMING_SNAKE_CASE_ : Tuple = model.to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
SCREAMING_SNAKE_CASE_ : int = prepare_img()
SCREAMING_SNAKE_CASE_ : str = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 718 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
SCREAMING_SNAKE_CASE_ : str = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
SCREAMING_SNAKE_CASE_ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
SCREAMING_SNAKE_CASE_ : Optional[int] = subset[i - 1][j]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE_ : int = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 395 | import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE : Tuple = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
SCREAMING_SNAKE_CASE : Tuple = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
SCREAMING_SNAKE_CASE : Optional[int] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def A_ (self ) -> Dict:
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = CHRF.CHAR_ORDER , __UpperCamelCase = CHRF.WORD_ORDER , __UpperCamelCase = CHRF.BETA , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , ) -> List[Any]:
UpperCamelCase_ : List[str] = len(references[0] )
if any(len(__UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
UpperCamelCase_ : Optional[Any] = [[refs[i] for refs in references] for i in range(__UpperCamelCase )]
UpperCamelCase_ : List[Any] = CHRF(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : int = sb_chrf.corpus_score(__UpperCamelCase , __UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 635 | 0 |
'''simple docstring'''
import torch
from transformers import AutoModel
class _snake_case ( torch.nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(_SCREAMING_SNAKE_CASE , self ).__init__()
lowerCAmelCase = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.nn.CosineSimilarity(3 , 1e-08 )
lowerCAmelCase = torch.nn.Softmax(dim=1 )
def _SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.bert(**_SCREAMING_SNAKE_CASE ).last_hidden_state
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = W_supports['sizes'].tolist()
lowerCAmelCase = W_supports['start_token_id'].item()
lowerCAmelCase = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCAmelCase = self.BERT(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.BERT(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = W_supports['input_ids'] == start_token_id
lowerCAmelCase = W_supports['input_ids'] == end_token_id
for i, size in enumerate(_SCREAMING_SNAKE_CASE ):
if i == 0:
lowerCAmelCase = 0
else:
lowerCAmelCase = support_sizes[i - 1]
lowerCAmelCase = S[s : s + size][start_token_masks[s : s + size]]
lowerCAmelCase = S[s : s + size][end_token_masks[s : s + size]]
lowerCAmelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowerCAmelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCAmelCase = torch.vstack((p_starts, p_start) )
lowerCAmelCase = torch.vstack((p_ends, p_end) )
else:
lowerCAmelCase = p_start
lowerCAmelCase = p_end
return p_starts, p_ends
| 514 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 * 8 , _SCREAMING_SNAKE_CASE=32 * 8 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=64 , ):
'''simple docstring'''
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = is_training
lowerCAmelCase = use_auxiliary_loss
lowerCAmelCase = num_queries
lowerCAmelCase = num_channels
lowerCAmelCase = min_size
lowerCAmelCase = max_size
lowerCAmelCase = num_labels
lowerCAmelCase = hidden_dim
lowerCAmelCase = hidden_dim
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_SCREAMING_SNAKE_CASE ) > 0.5
).float()
lowerCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=_SCREAMING_SNAKE_CASE ) > 0.5).long()
lowerCAmelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase = self.num_queries
lowerCAmelCase = self.num_labels
lowerCAmelCase = [1, 1, 1, 1]
lowerCAmelCase = self.num_channels
lowerCAmelCase = 64
lowerCAmelCase = 1_28
lowerCAmelCase = self.hidden_dim
lowerCAmelCase = self.hidden_dim
lowerCAmelCase = self.hidden_dim
return config
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = output.encoder_hidden_states
lowerCAmelCase = output.pixel_decoder_hidden_states
lowerCAmelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) , config.decoder_layers )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
with torch.no_grad():
lowerCAmelCase = MaskaFormerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = MaskaFormerForUniversalSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
def comm_check_on_output(_SCREAMING_SNAKE_CASE ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase = model(pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )
comm_check_on_output(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(
pixel_values=_SCREAMING_SNAKE_CASE , pixel_mask=_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE )
comm_check_on_output(_SCREAMING_SNAKE_CASE )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _snake_case ( a_ , a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Tuple = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : int = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = MaskaFormerModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase = MaskaFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = (self.model_tester.min_size,) * 2
lowerCAmelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=_SCREAMING_SNAKE_CASE ),
'mask_labels': torch.randn((2, 10, *size) , device=_SCREAMING_SNAKE_CASE ),
'class_labels': torch.zeros(2 , 10 , device=_SCREAMING_SNAKE_CASE ).long(),
}
lowerCAmelCase = self.model_tester.get_config()
lowerCAmelCase = MaskaFormerForUniversalSegmentation(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.attentions is not None )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCAmelCase = self.all_model_classes[1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.all_model_classes[1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
model.train()
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , mask_labels=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCamelCase : Union[str, Any] = 1e-4
def snake_case ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class _snake_case ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 3_84, 3_84) )
with torch.no_grad():
lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_SCREAMING_SNAKE_CASE , (1, 3, 3_84, 3_84) )
with torch.no_grad():
lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# masks_queries_logits
lowerCAmelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowerCAmelCase = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
lowerCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
# class_queries_logits
lowerCAmelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_SCREAMING_SNAKE_CASE ).eval()
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='pt' , )
lowerCAmelCase = inputs['pixel_values'].to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs['mask_labels']]
lowerCAmelCase = [el.to(_SCREAMING_SNAKE_CASE ) for el in inputs['class_labels']]
with torch.no_grad():
lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
self.assertTrue(outputs.loss is not None )
| 514 | 1 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ = 5_00_00
lowercase_ = 50_00
lowercase_ , lowercase_ = os.path.split(__file__)
lowercase_ = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
for i in range(_UpperCAmelCase ):
_a = dataset[i]
@get_duration
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase ):
_a = dataset[i : i + batch_size]
@get_duration
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
with dataset.formatted_as(type=_UpperCAmelCase ):
for i in range(_UpperCAmelCase ):
_a = dataset[i]
@get_duration
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
with dataset.formatted_as(type=_UpperCAmelCase ):
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
_a = dataset[i : i + batch_size]
def SCREAMING_SNAKE_CASE ( ) -> str:
_a = {'num examples': SPEED_TEST_N_EXAMPLES}
_a = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
_a = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
_a = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
_a = generate_example_dataset(
os.path.join(_UpperCAmelCase , 'dataset.arrow' ) , _UpperCAmelCase , num_examples=_UpperCAmelCase , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(_UpperCAmelCase ) )
_a = func(_UpperCAmelCase , **_UpperCAmelCase )
print('shuffling dataset' )
_a = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(_UpperCAmelCase ) )
_a = func(
_UpperCAmelCase , **_UpperCAmelCase )
with open(_UpperCAmelCase , 'wb' ) as f:
f.write(json.dumps(_UpperCAmelCase ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 562 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowercase_ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
lowercase_ = {
'moussaKam/mbarthez': 10_24,
'moussaKam/barthez': 10_24,
'moussaKam/barthez-orangesum-title': 10_24,
}
lowercase_ = '▁'
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int="<s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
_a = vocab_file
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
_a = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
_a = len(self.sp_model ) - 1
_a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a = [self.cls_token_id]
_a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCAmelCase ( self : Optional[int] ):
return len(self.sp_model )
def _UpperCAmelCase ( self : Dict ):
_a = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
return spm_id if spm_id else self.unk_token_id
def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ):
_a = []
_a = ''
_a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
_a = True
_a = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
_a = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : List[str] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ):
_a = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 562 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
__lowerCamelCase = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowerCamelCase = model(lowerCamelCase__ )['last_hidden_state']
__lowerCamelCase = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowerCamelCase__ )
# compare the actual values for a slice.
__lowerCamelCase = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 706 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = ['''input_features''', '''is_longer''']
def __init__( self , lowerCamelCase__=64 , lowerCamelCase__=48_000 , lowerCamelCase__=480 , lowerCamelCase__=10 , lowerCamelCase__=1_024 , lowerCamelCase__=0.0 , lowerCamelCase__=False , lowerCamelCase__ = 0 , lowerCamelCase__ = 14_000 , lowerCamelCase__ = None , lowerCamelCase__ = "fusion" , lowerCamelCase__ = "repeatpad" , **lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCamelCase = top_db
__lowerCamelCase = truncation
__lowerCamelCase = padding
__lowerCamelCase = fft_window_size
__lowerCamelCase = (fft_window_size >> 1) + 1
__lowerCamelCase = hop_length
__lowerCamelCase = max_length_s
__lowerCamelCase = max_length_s * sampling_rate
__lowerCamelCase = sampling_rate
__lowerCamelCase = frequency_min
__lowerCamelCase = frequency_max
__lowerCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm=lowerCamelCase__ , mel_scale='htk' , )
__lowerCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase__ , min_frequency=lowerCamelCase__ , max_frequency=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , norm='slaney' , mel_scale='slaney' , )
def lowercase_ ( self ) -> Dict[str, Any]:
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> np.ndarray:
'''simple docstring'''
__lowerCamelCase = spectrogram(
lowerCamelCase__ , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase__ , log_mel='dB' , )
return log_mel_spectrogram.T
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__lowerCamelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__lowerCamelCase = [0]
# randomly choose index for each part
__lowerCamelCase = np.random.choice(ranges[0] )
__lowerCamelCase = np.random.choice(ranges[1] )
__lowerCamelCase = np.random.choice(ranges[2] )
__lowerCamelCase = mel[idx_front : idx_front + chunk_frames, :]
__lowerCamelCase = mel[idx_middle : idx_middle + chunk_frames, :]
__lowerCamelCase = mel[idx_back : idx_back + chunk_frames, :]
__lowerCamelCase = torch.tensor(mel[None, None, :] )
__lowerCamelCase = torch.nn.functional.interpolate(
lowerCamelCase__ , size=[chunk_frames, 64] , mode='bilinear' , align_corners=lowerCamelCase__ )
__lowerCamelCase = mel_shrink[0][0].numpy()
__lowerCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__lowerCamelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__lowerCamelCase = len(lowerCamelCase__ ) - max_length
__lowerCamelCase = np.random.randint(0 , overflow + 1 )
__lowerCamelCase = waveform[idx : idx + max_length]
__lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
__lowerCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__lowerCamelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__lowerCamelCase = np.stack([mel, mel, mel, mel] , axis=0 )
__lowerCamelCase = False
else:
__lowerCamelCase = self._random_mel_fusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
__lowerCamelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__lowerCamelCase = int(max_length / len(lowerCamelCase__ ) )
__lowerCamelCase = np.stack(np.tile(lowerCamelCase__ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__lowerCamelCase = int(max_length / len(lowerCamelCase__ ) )
__lowerCamelCase = np.stack(np.tile(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = np.pad(lowerCamelCase__ , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
__lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters )
__lowerCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__lowerCamelCase = self._np_extract_fbank_features(lowerCamelCase__ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> BatchFeature:
'''simple docstring'''
__lowerCamelCase = truncation if truncation is not None else self.truncation
__lowerCamelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowerCamelCase = isinstance(lowerCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__lowerCamelCase = is_batched_numpy or (
isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCamelCase = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ):
__lowerCamelCase = np.asarray(lowerCamelCase__ , dtype=np.floataa )
elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCamelCase = [np.asarray(lowerCamelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
__lowerCamelCase = [
self._get_input_mel(lowerCamelCase__ , max_length if max_length else self.nb_max_samples , lowerCamelCase__ , lowerCamelCase__ )
for waveform in raw_speech
]
__lowerCamelCase = []
__lowerCamelCase = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase__ )
is_longer.append(lowerCamelCase__ )
if truncation == "fusion" and sum(lowerCamelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__lowerCamelCase = np.random.randint(0 , len(lowerCamelCase__ ) )
__lowerCamelCase = True
if isinstance(input_mel[0] , lowerCamelCase__ ):
__lowerCamelCase = [np.asarray(lowerCamelCase__ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__lowerCamelCase = [[longer] for longer in is_longer]
__lowerCamelCase = {'input_features': input_mel, 'is_longer': is_longer}
__lowerCamelCase = BatchFeature(lowerCamelCase__ )
if return_tensors is not None:
__lowerCamelCase = input_features.convert_to_tensors(lowerCamelCase__ )
return input_features
| 167 | 0 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase_ : Any = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
UpperCAmelCase_ : str = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
UpperCAmelCase_ : Dict = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
UpperCAmelCase_ : Tuple = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
UpperCAmelCase_ : int = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCAmelCase ( datasets.Metric):
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=[1, 10, 100] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3.0 ) -> List[str]:
'''simple docstring'''
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=__SCREAMING_SNAKE_CASE ) as executor:
__snake_case = []
__snake_case = Counter()
__snake_case = 0
__snake_case = defaultdict(__SCREAMING_SNAKE_CASE )
for task_id, (candidates, test_case) in enumerate(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ):
for candidate in candidates:
__snake_case = candidate + '''\n''' + test_case
__snake_case = (test_program, timeout, task_id, completion_id[task_id])
__snake_case = executor.submit(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
futures.append(__SCREAMING_SNAKE_CASE )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__SCREAMING_SNAKE_CASE ):
__snake_case = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
__snake_case , __snake_case = [], []
for result in results.values():
result.sort()
__snake_case = [r[1]['''passed'''] for r in result]
total.append(len(__SCREAMING_SNAKE_CASE ) )
correct.append(sum(__SCREAMING_SNAKE_CASE ) )
__snake_case = np.array(__SCREAMING_SNAKE_CASE )
__snake_case = np.array(__SCREAMING_SNAKE_CASE )
__snake_case = k
__snake_case = {F'''pass@{k}''': estimate_pass_at_k(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple )-> int:
'''simple docstring'''
def estimator(_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case = itertools.repeat(_lowerCamelCase , len(_lowerCamelCase ) )
else:
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
__snake_case = iter(_lowerCamelCase )
return np.array([estimator(int(_lowerCamelCase ) , int(_lowerCamelCase ) , _lowerCamelCase ) for n, c in zip(_lowerCamelCase , _lowerCamelCase )] )
| 24 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = 0.0
lowercase__ = 1
lowercase__ = 1
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = []
_snake_case : Tuple = []
for i in range(self.num_layers ):
_snake_case : str = self.in_channels if i == 0 else self.out_channels
_snake_case : Tuple = FlaxResnetBlockaD(
in_channels=a_, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(a_ )
_snake_case : str = FlaxTransformeraDModel(
in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(a_ )
_snake_case : Optional[int] = resnets
_snake_case : Tuple = attentions
if self.add_downsample:
_snake_case : Optional[int] = FlaxDownsampleaD(self.out_channels, dtype=self.dtype )
def __call__( self: Dict, a_: Any, a_: List[str], a_: Optional[Any], a_: List[str]=True ):
'''simple docstring'''
_snake_case : List[str] = ()
for resnet, attn in zip(self.resnets, self.attentions ):
_snake_case : Optional[Any] = resnet(a_, a_, deterministic=a_ )
_snake_case : str = attn(a_, a_, deterministic=a_ )
output_states += (hidden_states,)
if self.add_downsample:
_snake_case : Optional[Any] = self.downsamplers_a(a_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = 0.0
lowercase__ = 1
lowercase__ = True
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Any = []
for i in range(self.num_layers ):
_snake_case : str = self.in_channels if i == 0 else self.out_channels
_snake_case : int = FlaxResnetBlockaD(
in_channels=a_, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(a_ )
_snake_case : Optional[Any] = resnets
if self.add_downsample:
_snake_case : Tuple = FlaxDownsampleaD(self.out_channels, dtype=self.dtype )
def __call__( self: Optional[Any], a_: Optional[Any], a_: List[Any], a_: Optional[int]=True ):
'''simple docstring'''
_snake_case : int = ()
for resnet in self.resnets:
_snake_case : Any = resnet(a_, a_, deterministic=a_ )
output_states += (hidden_states,)
if self.add_downsample:
_snake_case : Dict = self.downsamplers_a(a_ )
output_states += (hidden_states,)
return hidden_states, output_states
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 0.0
lowercase__ = 1
lowercase__ = 1
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Any = []
_snake_case : List[Any] = []
for i in range(self.num_layers ):
_snake_case : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_snake_case : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
_snake_case : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(a_ )
_snake_case : str = FlaxTransformeraDModel(
in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=self.out_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(a_ )
_snake_case : Any = resnets
_snake_case : Union[str, Any] = attentions
if self.add_upsample:
_snake_case : Dict = FlaxUpsampleaD(self.out_channels, dtype=self.dtype )
def __call__( self: Any, a_: int, a_: List[Any], a_: Union[str, Any], a_: Optional[Any], a_: List[Any]=True ):
'''simple docstring'''
for resnet, attn in zip(self.resnets, self.attentions ):
# pop res hidden states
_snake_case : List[str] = res_hidden_states_tuple[-1]
_snake_case : Any = res_hidden_states_tuple[:-1]
_snake_case : int = jnp.concatenate((hidden_states, res_hidden_states), axis=-1 )
_snake_case : Optional[Any] = resnet(a_, a_, deterministic=a_ )
_snake_case : str = attn(a_, a_, deterministic=a_ )
if self.add_upsample:
_snake_case : Dict = self.upsamplers_a(a_ )
return hidden_states
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 0.0
lowercase__ = 1
lowercase__ = True
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = []
for i in range(self.num_layers ):
_snake_case : Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_snake_case : int = self.prev_output_channel if i == 0 else self.out_channels
_snake_case : List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(a_ )
_snake_case : List[Any] = resnets
if self.add_upsample:
_snake_case : Optional[Any] = FlaxUpsampleaD(self.out_channels, dtype=self.dtype )
def __call__( self: str, a_: Any, a_: List[Any], a_: str, a_: Union[str, Any]=True ):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
_snake_case : int = res_hidden_states_tuple[-1]
_snake_case : Dict = res_hidden_states_tuple[:-1]
_snake_case : List[Any] = jnp.concatenate((hidden_states, res_hidden_states), axis=-1 )
_snake_case : Any = resnet(a_, a_, deterministic=a_ )
if self.add_upsample:
_snake_case : int = self.upsamplers_a(a_ )
return hidden_states
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 0.0
lowercase__ = 1
lowercase__ = 1
lowercase__ = False
lowercase__ = False
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, )
]
_snake_case : int = []
for _ in range(self.num_layers ):
_snake_case : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels, n_heads=self.num_attention_heads, d_head=self.in_channels // self.num_attention_heads, depth=1, use_linear_projection=self.use_linear_projection, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype, )
attentions.append(a_ )
_snake_case : Tuple = FlaxResnetBlockaD(
in_channels=self.in_channels, out_channels=self.in_channels, dropout_prob=self.dropout, dtype=self.dtype, )
resnets.append(a_ )
_snake_case : Optional[Any] = resnets
_snake_case : List[str] = attentions
def __call__( self: Tuple, a_: Optional[int], a_: str, a_: Optional[int], a_: int=True ):
'''simple docstring'''
_snake_case : Dict = self.resnets[0](a_, a_ )
for attn, resnet in zip(self.attentions, self.resnets[1:] ):
_snake_case : str = attn(a_, a_, deterministic=a_ )
_snake_case : Union[str, Any] = resnet(a_, a_, deterministic=a_ )
return hidden_states
| 609 | 0 |
from __future__ import annotations
_snake_case = []
def lowerCamelCase_ ( A : list[list[int]] , A : int , A : int ):
"""simple docstring"""
for i in range(len(A ) ):
if board[row][i] == 1:
return False
for i in range(len(A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(A , -1 , -1 ) , range(A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(A , -1 , -1 ) , range(A , len(A ) ) ):
if board[i][j] == 1:
return False
return True
def lowerCamelCase_ ( A : list[list[int]] , A : int ):
"""simple docstring"""
if row >= len(A ):
solution.append(A )
printboard(A )
print()
return True
for i in range(len(A ) ):
if is_safe(A , A , A ):
lowerCAmelCase_ = 1
solve(A , row + 1 )
lowerCAmelCase_ = 0
return False
def lowerCamelCase_ ( A : list[list[int]] ):
"""simple docstring"""
for i in range(len(A ) ):
for j in range(len(A ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
_snake_case = 8
_snake_case = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 718 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :Union[str, Any] = 'poolformer'
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=16 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=4.0 , _UpperCAmelCase=[2, 2, 6, 2] , _UpperCAmelCase=[64, 128, 320, 512] , _UpperCAmelCase=[7, 3, 3, 3] , _UpperCAmelCase=[4, 2, 2, 2] , _UpperCAmelCase=[2, 1, 1, 1] , _UpperCAmelCase=4 , _UpperCAmelCase=0.0 , _UpperCAmelCase="gelu" , _UpperCAmelCase=True , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.02 , **_UpperCAmelCase , ):
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = stride
lowerCAmelCase_ = padding
lowerCAmelCase_ = pool_size
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = mlp_ratio
lowerCAmelCase_ = depths
lowerCAmelCase_ = patch_sizes
lowerCAmelCase_ = strides
lowerCAmelCase_ = num_encoder_blocks
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = use_layer_scale
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = initializer_range
super().__init__(**_UpperCAmelCase)
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :Optional[Any] = version.parse('1.11' )
@property
def lowercase__ ( self):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def lowercase__ ( self):
return 2E-3
| 413 | 0 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowercase__ : Dict = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowercase__ : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowercase__ : int = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowercase__ : List[str] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowercase__ : Tuple = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def a__ ( lowercase : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''', lowercase )
return [m.group(0 ) for m in matches]
def a__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCamelCase = {
config.replace('''Config''', '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_UpperCamelCase = collections.defaultdict(lowercase )
_UpperCamelCase = collections.defaultdict(lowercase )
_UpperCamelCase = collections.defaultdict(lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase ):
_UpperCamelCase = None
if _re_tf_models.match(lowercase ) is not None:
_UpperCamelCase = tf_models
_UpperCamelCase = _re_tf_models.match(lowercase ).groups()[0]
elif _re_flax_models.match(lowercase ) is not None:
_UpperCamelCase = flax_models
_UpperCamelCase = _re_flax_models.match(lowercase ).groups()[0]
elif _re_pt_models.match(lowercase ) is not None:
_UpperCamelCase = pt_models
_UpperCamelCase = _re_pt_models.match(lowercase ).groups()[0]
if lookup_dict is not None:
while len(lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
_UpperCamelCase = True
break
# Try again after removing the last word in the name
_UpperCamelCase = ''''''.join(camel_case_split(lowercase )[:-1] )
_UpperCamelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_UpperCamelCase = list(lowercase )
all_models.sort()
_UpperCamelCase = {'''model_type''': all_models}
_UpperCamelCase = [pt_models[t] for t in all_models]
_UpperCamelCase = [tf_models[t] for t in all_models]
_UpperCamelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_UpperCamelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_UpperCamelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_UpperCamelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_UpperCamelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_UpperCamelCase = '''AutoTokenizer'''
_UpperCamelCase = [processors[t] for t in all_models]
return pd.DataFrame(lowercase )
def a__ ( lowercase : str ) -> Dict:
"""simple docstring"""
_UpperCamelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_UpperCamelCase = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
_UpperCamelCase = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase, lowercase, lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase, lowercase ):
continue
# First extract all model_names
_UpperCamelCase = []
for name in getattr(lowercase, lowercase ).values():
if isinstance(lowercase, lowercase ):
model_names.append(lowercase )
else:
model_names.extend(list(lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def a__ ( lowercase : Optional[Any], lowercase : Any ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = get_frameworks_table()
_UpperCamelCase = Dataset.from_pandas(lowercase )
_UpperCamelCase = hf_hub_download(
'''huggingface/transformers-metadata''', '''pipeline_tags.json''', repo_type='''dataset''', token=lowercase )
_UpperCamelCase = Dataset.from_json(lowercase )
_UpperCamelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(lowercase ) )
}
_UpperCamelCase = update_pipeline_and_auto_class_table(lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_UpperCamelCase = sorted(table.keys() )
_UpperCamelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
_UpperCamelCase = Dataset.from_pandas(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase, '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(lowercase, '''pipeline_tags.json''' ) )
if commit_sha is not None:
_UpperCamelCase = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
_UpperCamelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''', folder_path=lowercase, repo_type='''dataset''', token=lowercase, commit_message=lowercase, )
def a__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_UpperCamelCase = transformers_module.pipelines.SUPPORTED_TASKS
_UpperCamelCase = []
for key in pipeline_tasks:
if key not in in_table:
_UpperCamelCase = pipeline_tasks[key]['''pt''']
if isinstance(lowercase, (list, tuple) ):
_UpperCamelCase = model[0]
_UpperCamelCase = model.__name__
if model not in in_table.values():
missing.append(lowercase )
if len(lowercase ) > 0:
_UpperCamelCase = ''', '''.join(lowercase )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
lowercase__ : Any = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 98 |
'''simple docstring'''
from __future__ import annotations
import queue
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : Optional[int] ) -> str:
'''simple docstring'''
_UpperCamelCase = data
_UpperCamelCase = None
_UpperCamelCase = None
def a__ ( ) -> TreeNode:
"""simple docstring"""
print('''\n********Press N to stop entering at any point of time********\n''' )
_UpperCamelCase = input('''Enter the value of the root node: ''' ).strip().lower()
_UpperCamelCase = queue.Queue()
_UpperCamelCase = TreeNode(int(lowercase ) )
q.put(lowercase )
while not q.empty():
_UpperCamelCase = q.get()
_UpperCamelCase = F"""Enter the left node of {node_found.data}: """
_UpperCamelCase = input(lowercase ).strip().lower() or '''n'''
if check == "n":
return tree_node
_UpperCamelCase = TreeNode(int(lowercase ) )
_UpperCamelCase = left_node
q.put(lowercase )
_UpperCamelCase = F"""Enter the right node of {node_found.data}: """
_UpperCamelCase = input(lowercase ).strip().lower() or '''n'''
if check == "n":
return tree_node
_UpperCamelCase = TreeNode(int(lowercase ) )
_UpperCamelCase = right_node
q.put(lowercase )
raise
def a__ ( lowercase : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(lowercase, lowercase ) or not node:
return
print(node.data, end=''',''' )
pre_order(node.left )
pre_order(node.right )
def a__ ( lowercase : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(lowercase, lowercase ) or not node:
return
in_order(node.left )
print(node.data, end=''',''' )
in_order(node.right )
def a__ ( lowercase : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(lowercase, lowercase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data, end=''',''' )
def a__ ( lowercase : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(lowercase, lowercase ) or not node:
return
_UpperCamelCase = queue.Queue()
q.put(lowercase )
while not q.empty():
_UpperCamelCase = q.get()
print(node_dequeued.data, end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def a__ ( lowercase : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(lowercase, lowercase ) or not node:
return
_UpperCamelCase = queue.Queue()
q.put(lowercase )
while not q.empty():
_UpperCamelCase = []
while not q.empty():
_UpperCamelCase = q.get()
print(node_dequeued.data, end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowercase )
def a__ ( lowercase : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(lowercase, lowercase ) or not node:
return
_UpperCamelCase = []
_UpperCamelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data, end=''',''' )
stack.append(lowercase )
_UpperCamelCase = n.left
# end of while means current node doesn't have left child
_UpperCamelCase = stack.pop()
# start to traverse its right child
_UpperCamelCase = n.right
def a__ ( lowercase : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(lowercase, lowercase ) or not node:
return
_UpperCamelCase = []
_UpperCamelCase = node
while n or stack:
while n:
stack.append(lowercase )
_UpperCamelCase = n.left
_UpperCamelCase = stack.pop()
print(n.data, end=''',''' )
_UpperCamelCase = n.right
def a__ ( lowercase : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(lowercase, lowercase ) or not node:
return
_UpperCamelCase , _UpperCamelCase = [], []
_UpperCamelCase = node
stacka.append(lowercase )
while stacka: # to find the reversed order of post order, store it in stack2
_UpperCamelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowercase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data, end=''',''' )
def a__ ( lowercase : str = "", lowercase : List[str]=50, lowercase : List[str]="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_UpperCamelCase , _UpperCamelCase = divmod(width - len(lowercase ) - 2, 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
lowercase__ : TreeNode = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 98 | 1 |
import math
lowerCAmelCase : List[str] =10
lowerCAmelCase : List[Any] =7
lowerCAmelCase : Dict =BALLS_PER_COLOUR * NUM_COLOURS
def A__ ( __A = 20 ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = math.comb(__A , __A )
_lowerCamelCase : Any = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __A )
_lowerCamelCase : Optional[int] = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 15 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float:
"""simple docstring"""
return 0.0
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Tuple = [1] + [0] * (size - 1)
_lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) )
_lowerCamelCase : List[Any] = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowerCamelCase : Any = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__A )
plt.show()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCamelCase : int = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Any = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 15 | 1 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE__ ( a__ ):
def __init__( self: List[Any] , a: str , a: int , a: str , a: Dict , ) ->List[str]:
'''simple docstring'''
super().__init__()
a_ = value_function
a_ = unet
a_ = scheduler
a_ = env
a_ = env.get_dataset()
a_ = {}
for key in self.data.keys():
try:
a_ = self.data[key].mean()
except: # noqa: E722
pass
a_ = {}
for key in self.data.keys():
try:
a_ = self.data[key].std()
except: # noqa: E722
pass
a_ = env.observation_space.shape[0]
a_ = env.action_space.shape[0]
def _lowerCAmelCase ( self: Any , a: str , a: List[str]) ->str:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def _lowerCAmelCase ( self: Dict , a: Union[str, Any] , a: str) ->int:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def _lowerCAmelCase ( self: Union[str, Any] , a: Dict) ->Dict:
'''simple docstring'''
if type(SCREAMING_SNAKE_CASE_) is dict:
return {k: self.to_torch(SCREAMING_SNAKE_CASE_) for k, v in x_in.items()}
elif torch.is_tensor(SCREAMING_SNAKE_CASE_):
return x_in.to(self.unet.device)
return torch.tensor(SCREAMING_SNAKE_CASE_ , device=self.unet.device)
def _lowerCAmelCase ( self: Optional[int] , a: Optional[int] , a: Tuple , a: int) ->Any:
'''simple docstring'''
for key, val in cond.items():
a_ = val.clone()
return x_in
def _lowerCAmelCase ( self: List[Any] , a: List[str] , a: Dict , a: Optional[int] , a: Any) ->Optional[int]:
'''simple docstring'''
a_ = x.shape[0]
a_ = None
for i in tqdm.tqdm(self.scheduler.timesteps):
# create batch of timesteps to pass into model
a_ = torch.full((batch_size,) , SCREAMING_SNAKE_CASE_ , device=self.unet.device , dtype=torch.long)
for _ in range(SCREAMING_SNAKE_CASE_):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
a_ = self.value_function(x.permute(0 , 2 , 1) , SCREAMING_SNAKE_CASE_).sample
a_ = torch.autograd.grad([y.sum()] , [x])[0]
a_ = self.scheduler._get_variance(SCREAMING_SNAKE_CASE_)
a_ = torch.exp(0.5 * posterior_variance)
a_ = model_std * grad
a_ = 0
a_ = x.detach()
a_ = x + scale * grad
a_ = self.reset_xa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.action_dim)
a_ = self.unet(x.permute(0 , 2 , 1) , SCREAMING_SNAKE_CASE_).sample.permute(0 , 2 , 1)
# TODO: verify deprecation of this kwarg
a_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , predict_epsilon=SCREAMING_SNAKE_CASE_)['prev_sample']
# apply conditions to the trajectory (set the initial state)
a_ = self.reset_xa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.action_dim)
a_ = self.to_torch(SCREAMING_SNAKE_CASE_)
return x, y
def __call__( self: Dict , a: List[str] , a: str=64 , a: Tuple=32 , a: str=2 , a: List[Any]=0.1) ->Dict:
'''simple docstring'''
a_ = self.normalize(SCREAMING_SNAKE_CASE_ , "observations")
a_ = obs[None].repeat(SCREAMING_SNAKE_CASE_ , axis=0)
a_ = {0: self.to_torch(SCREAMING_SNAKE_CASE_)}
a_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
a_ = randn_tensor(SCREAMING_SNAKE_CASE_ , device=self.unet.device)
a_ = self.reset_xa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.action_dim)
a_ = self.to_torch(SCREAMING_SNAKE_CASE_)
# run the diffusion process
a_ = self.run_diffusion(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# sort output trajectories by value
a_ = y.argsort(0 , descending=SCREAMING_SNAKE_CASE_).squeeze()
a_ = x[sorted_idx]
a_ = sorted_values[:, :, : self.action_dim]
a_ = actions.detach().cpu().numpy()
a_ = self.de_normalize(SCREAMING_SNAKE_CASE_ , key="actions")
# select the action with the highest value
if y is not None:
a_ = 0
else:
# if we didn't run value guiding, select a random action
a_ = np.random.randint(0 , SCREAMING_SNAKE_CASE_)
a_ = denorm_actions[selected_index, 0]
return denorm_actions
| 685 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = '''src/transformers'''
__UpperCAmelCase = '''docs/source/en/tasks'''
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
# Find the start prompt.
UpperCamelCase : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
UpperCamelCase : Optional[Any] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
UpperCamelCase : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 40 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a ="""\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
a ="""\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
a ="""
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : List[Any]):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence'),
'references': datasets.Value('string' ,id='sequence'),
}) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Dict="auto" ,SCREAMING_SNAKE_CASE__ : List[str]=-1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.9 ,SCREAMING_SNAKE_CASE__ : List[str]=5 ,SCREAMING_SNAKE_CASE__ : Any=5_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[int]="gpt2-large" ,SCREAMING_SNAKE_CASE__ : Tuple=-1 ,SCREAMING_SNAKE_CASE__ : str=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : Any=2_5 ,SCREAMING_SNAKE_CASE__ : Dict=5 ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_5 ,):
__lowerCamelCase : str = compute_mauve(
p_text=SCREAMING_SNAKE_CASE__ ,q_text=SCREAMING_SNAKE_CASE__ ,p_features=SCREAMING_SNAKE_CASE__ ,q_features=SCREAMING_SNAKE_CASE__ ,p_tokens=SCREAMING_SNAKE_CASE__ ,q_tokens=SCREAMING_SNAKE_CASE__ ,num_buckets=SCREAMING_SNAKE_CASE__ ,pca_max_data=SCREAMING_SNAKE_CASE__ ,kmeans_explained_var=SCREAMING_SNAKE_CASE__ ,kmeans_num_redo=SCREAMING_SNAKE_CASE__ ,kmeans_max_iter=SCREAMING_SNAKE_CASE__ ,featurize_model_name=SCREAMING_SNAKE_CASE__ ,device_id=SCREAMING_SNAKE_CASE__ ,max_text_length=SCREAMING_SNAKE_CASE__ ,divergence_curve_discretization_size=SCREAMING_SNAKE_CASE__ ,mauve_scaling_factor=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,seed=SCREAMING_SNAKE_CASE__ ,)
return out
| 337 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list[int]:
if length <= 0 or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(lowerCamelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 337 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'openai-gpt'
SCREAMING_SNAKE_CASE_ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , SCREAMING_SNAKE_CASE_=40478 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_="cls_index" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.1 , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = n_positions
lowerCamelCase_ = n_embd
lowerCamelCase_ = n_layer
lowerCamelCase_ = n_head
lowerCamelCase_ = afn
lowerCamelCase_ = resid_pdrop
lowerCamelCase_ = embd_pdrop
lowerCamelCase_ = attn_pdrop
lowerCamelCase_ = layer_norm_epsilon
lowerCamelCase_ = initializer_range
lowerCamelCase_ = summary_type
lowerCamelCase_ = summary_use_proj
lowerCamelCase_ = summary_activation
lowerCamelCase_ = summary_first_dropout
lowerCamelCase_ = summary_proj_to_labels
super().__init__(**SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase_ = ksize + 1
lowerCamelCase_ = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(__UpperCamelCase ):
for x in range(__UpperCamelCase ):
# distance from center
lowerCamelCase_ = x - ksize // 2
lowerCamelCase_ = y - ksize // 2
# degree to radiant
lowerCamelCase_ = theta / 1_80 * np.pi
lowerCamelCase_ = np.cos(_theta )
lowerCamelCase_ = np.sin(_theta )
# get kernel x
lowerCamelCase_ = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
A_ = imread("../image_data/lena.jpg")
# turn image in gray scale value
A_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
A_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
A_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
A_ = out / out.max() * 255
A_ = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 42 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A : int = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ["torch", "scipy"]
def __init__( self : Tuple , *__snake_case : List[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , *__snake_case : Optional[Any] , **__snake_case : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , *__snake_case : Optional[Any] , **__snake_case : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'scipy'] )
| 273 | 0 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ) -> List[str]:
# Initialise PyTorch model
__a = BigBirdConfig.from_json_file(snake_case__ )
print(f'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
__a = BigBirdForQuestionAnswering(snake_case__ )
else:
__a = BigBirdForPreTraining(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(snake_case__ , snake_case__ , is_trivia_qa=snake_case__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
lowercase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 695 |
import os
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
__UpperCAmelCase =os.path.dirname(os.path.realpath(snake_case__ ) )
__UpperCAmelCase =os.path.join(snake_case__ , '''triangle.txt''' )
with open(snake_case__ ) as f:
__UpperCAmelCase =f.readlines()
__UpperCAmelCase =[]
for line in triangle:
__UpperCAmelCase =[]
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(snake_case__ ) )
a.append(snake_case__ )
for i in range(1 , len(snake_case__ ) ):
for j in range(len(a[i] ) ):
__UpperCAmelCase =a[i - 1][j] if j != len(a[i - 1] ) else 0
__UpperCAmelCase =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case__ , snake_case__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 132 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : List[str] = current_set.copy()
for row_index, row in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case_ : int = row[0]
for column_index, column in enumerate(__SCREAMING_SNAKE_CASE ):
if magnitude == 0:
snake_case_ : Optional[int] = column
continue
snake_case_ : Optional[Any] = column / magnitude
# Subtract to cancel term
snake_case_ : List[Any] = current_set[0]
snake_case_ : Optional[Any] = [first_row]
snake_case_ : Union[str, Any] = current_set[1::]
for row in current_set:
snake_case_ : Optional[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__SCREAMING_SNAKE_CASE )
continue
for column_index in range(len(__SCREAMING_SNAKE_CASE ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__SCREAMING_SNAKE_CASE )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
snake_case_ : Union[str, Any] = final_set[0]
snake_case_ : Dict = []
snake_case_ : Optional[Any] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
snake_case_ : Dict = simplify(__SCREAMING_SNAKE_CASE )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, __SCREAMING_SNAKE_CASE )
snake_case_ : Any = resultant
return final_set
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
snake_case_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) + 1
if any(len(__SCREAMING_SNAKE_CASE ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(__SCREAMING_SNAKE_CASE, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(__SCREAMING_SNAKE_CASE ) == 1:
return [equations[0][-1] / equations[0][0]]
snake_case_ : int = equations.copy()
if any(0 in row for row in data_set ):
snake_case_ : Tuple = data_set.copy()
snake_case_ : List[Any] = []
for row_index, row in enumerate(__SCREAMING_SNAKE_CASE ):
if 0 not in row:
snake_case_ : Tuple = data_set.pop(__SCREAMING_SNAKE_CASE )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, __SCREAMING_SNAKE_CASE )
snake_case_ : Dict = data_set.copy()
snake_case_ : Tuple = simplify(__SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = simplified[::-1]
snake_case_ : list = []
for row in simplified:
snake_case_ : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
snake_case_ : Optional[Any] = row.copy()[: len(__SCREAMING_SNAKE_CASE ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__SCREAMING_SNAKE_CASE ) == 0:
solutions.append(0 )
continue
snake_case_ : List[Any] = temp_row[1::]
snake_case_ : Optional[Any] = temp_row[::-1]
for column_index, column in enumerate(__SCREAMING_SNAKE_CASE ):
current_solution -= column * solutions[column_index]
solutions.append(__SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = []
for item in solutions:
final.append(float(round(__SCREAMING_SNAKE_CASE, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 92 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 92 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> int:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = OPTConfig
UpperCAmelCase_ = {}
UpperCAmelCase_ = "gelu"
def __init__( self : str, _UpperCAmelCase : Optional[int], _UpperCAmelCase : Union[str, Any]=1_3, _UpperCAmelCase : str=7, _UpperCAmelCase : Dict=True, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : Dict=9_9, _UpperCAmelCase : List[Any]=1_6, _UpperCAmelCase : Any=2, _UpperCAmelCase : Dict=4, _UpperCAmelCase : int=4, _UpperCAmelCase : Union[str, Any]="gelu", _UpperCAmelCase : Any=0.1, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : Optional[Any]=2_0, _UpperCAmelCase : str=2, _UpperCAmelCase : str=1, _UpperCAmelCase : str=0, _UpperCAmelCase : Union[str, Any]=1_6, _UpperCAmelCase : str=1_6, ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = seq_length
SCREAMING_SNAKE_CASE__ : Any = is_training
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = pad_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : Any = embed_dim
SCREAMING_SNAKE_CASE__ : List[Any] = word_embed_proj_dim
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def A_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
SCREAMING_SNAKE_CASE__ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat([input_ids, eos_tensor], axis=1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.config_cls(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, word_embed_proj_dim=self.word_embed_proj_dim, is_encoder_decoder=a_, **self.config_updates, )
SCREAMING_SNAKE_CASE__ : List[str] = prepare_opt_inputs_dict(a_, a_ )
return config, inputs_dict
def A_ ( self : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = TFOPTModel(config=a_ )
SCREAMING_SNAKE_CASE__ : str = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE__ : Any = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ : str = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE__ : Any = 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_, attention_mask=a_, use_cache=a_ )
SCREAMING_SNAKE_CASE__ : Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((self.batch_size, 3), config.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat([input_ids, next_tokens], axis=-1 )
SCREAMING_SNAKE_CASE__ : int = tf.concat([attention_mask, next_attn_mask], axis=-1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_, attention_mask=a_ )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_, attention_mask=a_, past_key_values=a_ )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ : Tuple = int(ids_tensor((1,), output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a_, a_, rtol=1E-3 )
@require_tf
class lowerCamelCase (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
UpperCAmelCase_ = (TFOPTForCausalLM,) if is_tf_available() else ()
UpperCAmelCase_ = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = 10
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = TFOPTModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self, config_class=a_ )
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a_ )
def A_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_UpperCAmelCase : str, _UpperCAmelCase : str ):
if hasattr(a_, "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(a_, "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(config=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = _get_word_embedding_weight(a_, model.get_input_embeddings() )
SCREAMING_SNAKE_CASE__ : Any = _get_word_embedding_weight(a_, model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(a_ )
SCREAMING_SNAKE_CASE__ : Tuple = _get_word_embedding_weight(a_, model.get_input_embeddings() )
SCREAMING_SNAKE_CASE__ : Tuple = _get_word_embedding_weight(a_, model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], a_ )
# check that weights remain the same after resizing
SCREAMING_SNAKE_CASE__ : List[str] = True
for pa, pa in zip(old_input_embeddings.value(), new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE__ : int = False
self.assertTrue(a_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], a_ )
SCREAMING_SNAKE_CASE__ : Dict = True
for pa, pa in zip(old_output_embeddings.value(), new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE__ : str = False
self.assertTrue(a_ )
def _a ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return tf.constant(lowercase__ , dtype=tf.intaa )
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = 99
def A_ ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((4, 1), dtype=tf.intaa ) * 2
SCREAMING_SNAKE_CASE__ : str = tf.concat([ids_tensor((4, 6), self.vocab_size - 3 ) + 3, eos_column_vector], axis=1 )
SCREAMING_SNAKE_CASE__ : List[str] = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ : str = OPTConfig(
vocab_size=self.vocab_size, hidden_size=2_4, num_hidden_layers=2, num_attention_heads=2, ffn_dim=3_2, max_position_embeddings=4_8, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFOPTModel.from_pretrained("facebook/opt-350m" )
SCREAMING_SNAKE_CASE__ : List[Any] = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE__ : int = tf.not_equal(a_, model.config.pad_token_id )
with tf.GradientTape():
SCREAMING_SNAKE_CASE__ : List[str] = model(input_ids=a_, attention_mask=a_ ).last_hidden_state
SCREAMING_SNAKE_CASE__ : List[str] = (1, 1_1, 5_1_2)
self.assertEqual(output.shape, a_ )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3], a_, atol=4E-3 ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.function(a_, jit_compile=a_ )
SCREAMING_SNAKE_CASE__ : str = xla_generate(a_, a_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3], a_, atol=4E-2 ) )
@require_tf
@slow
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : str ) -> Any:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : str = 'facebook/opt-350m'
def A_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE__ : int = GPTaTokenizer.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE__ : List[Any] = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(a_, return_tensors="tf", padding=a_, add_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Dict = tf.math.reduce_mean(model(inputs.input_ids, attention_mask=inputs.attention_mask )[0], axis=-1 )
SCREAMING_SNAKE_CASE__ : int = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(a_, a_, atol=1E-4 ) )
SCREAMING_SNAKE_CASE__ : Dict = tf.function(a_, jit_compile=a_ )
SCREAMING_SNAKE_CASE__ : Dict = tf.math.reduce_mean(xla_generate(inputs.input_ids, attention_mask=inputs.attention_mask )[0], axis=-1 )
self.assertTrue(np.allclose(a_, a_, atol=1E-4 ) )
@require_tf
@slow
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@property
def A_ ( self : int ) -> List[Any]:
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def A_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'facebook/opt-125m'
SCREAMING_SNAKE_CASE__ : int = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Dict = GPTaTokenizer.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFOPTForCausalLM.from_pretrained(a_ )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(a_, return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE__ : Dict = model.generate(a_, max_length=1_0 )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.batch_decode(a_, skip_special_tokens=a_ )
predicted_outputs += generated_string
self.assertListEqual(a_, a_ )
def A_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'facebook/opt-350m'
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : Any = TFOPTForCausalLM.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = 'left'
# use different length sentences to test batching
SCREAMING_SNAKE_CASE__ : Any = [
'Hello, my dog is a little',
'Today, I',
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer(a_, return_tensors="tf", padding=a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs['input_ids']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(input_ids=a_, attention_mask=inputs["attention_mask"] )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(sentences[0], return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE__ : str = model.generate(input_ids=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1], tf.intaa ) )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(sentences[1], return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(input_ids=a_, max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.batch_decode(a_, skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : str = tokenizer.decode(output_non_padded[0], skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.decode(output_padded[0], skip_special_tokens=a_ )
SCREAMING_SNAKE_CASE__ : Dict = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(a_, a_ )
self.assertListEqual(a_, [non_padded_sentence, padded_sentence] )
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = 'facebook/opt-350m'
SCREAMING_SNAKE_CASE__ : List[Any] = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = GPTaTokenizer.from_pretrained(a_ )
SCREAMING_SNAKE_CASE__ : List[str] = TFOPTForCausalLM.from_pretrained(a_ )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE__ : int = tokenizer(a_, return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE__ : int = model.generate(a_, max_length=1_0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(a_, skip_special_tokens=a_ )
predicted_outputs += generated_string
self.assertListEqual(a_, a_ )
| 663 | import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE__ : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
SCREAMING_SNAKE_CASE__ : Tuple = json.load(f)
@require_torch
class snake_case ( unittest.TestCase ):
def __lowercase( self : List[str] , a_ : Any )-> str:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(a_ )
def __lowercase( self : int , a_ : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FSMTForConditionalGeneration.from_pretrained(a_ ).to(a_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def __lowercase( self : int , a_ : Optional[int] , a_ : str )-> List[str]:
"""simple docstring"""
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
SCREAMING_SNAKE_CASE__ : Any = F'''facebook/wmt19-{pair}'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer(a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_model(a_ )
SCREAMING_SNAKE_CASE__ : int = bleu_data[pair]['src']
SCREAMING_SNAKE_CASE__ : Optional[int] = bleu_data[pair]['tgt']
SCREAMING_SNAKE_CASE__ : Any = tokenizer(a_ , return_tensors='pt' , truncation=a_ , padding='longest' ).to(a_ )
SCREAMING_SNAKE_CASE__ : int = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(
a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_bleu(a_ , a_ )
print(a_ )
self.assertGreaterEqual(scores['bleu'] , a_ )
| 85 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class snake_case__ ( UpperCamelCase_ ):
_lowerCAmelCase =42
@flax_register_to_config
class snake_case__ ( nn.Module , UpperCamelCase_ , UpperCamelCase_ ):
_lowerCAmelCase =32
_lowerCAmelCase =4
_lowerCAmelCase =4
_lowerCAmelCase =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_lowerCAmelCase =("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_lowerCAmelCase =False
_lowerCAmelCase =(320, 640, 1280, 1280)
_lowerCAmelCase =2
_lowerCAmelCase =8
_lowerCAmelCase =None
_lowerCAmelCase =1280
_lowerCAmelCase =0.0
_lowerCAmelCase =False
_lowerCAmelCase =jnp.floataa
_lowerCAmelCase =True
_lowerCAmelCase =0
_lowerCAmelCase =False
def UpperCAmelCase__ ( self : Optional[Any] , _lowerCamelCase : jax.random.KeyArray ):
# init input tensors
snake_case__ : Optional[int] = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case__ : Tuple = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
snake_case__ : str = jnp.ones((1,) , dtype=jnp.intaa )
snake_case__ : Union[str, Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case__ , snake_case__ : Union[str, Any] = jax.random.split(_lowerCamelCase )
snake_case__ : Dict = {'params': params_rng, 'dropout': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def UpperCAmelCase__ ( self : Dict ):
snake_case__ : Dict = self.block_out_channels
snake_case__ : Dict = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case__ : List[Any] = self.num_attention_heads or self.attention_head_dim
# input
snake_case__ : Union[str, Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case__ : List[str] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case__ : List[str] = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
snake_case__ : Optional[Any] = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
snake_case__ : Any = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
snake_case__ : Any = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case__ : Union[str, Any] = []
snake_case__ : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
snake_case__ : str = output_channel
snake_case__ : List[str] = block_out_channels[i]
snake_case__ : List[Any] = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case__ : Dict = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case__ : int = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
snake_case__ : str = down_blocks
# mid
snake_case__ : Tuple = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
snake_case__ : List[str] = []
snake_case__ : Union[str, Any] = list(reversed(_lowerCamelCase ) )
snake_case__ : List[str] = list(reversed(_lowerCamelCase ) )
snake_case__ : List[Any] = list(reversed(_lowerCamelCase ) )
snake_case__ : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
snake_case__ : Tuple = output_channel
snake_case__ : List[str] = reversed_block_out_channels[i]
snake_case__ : str = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
snake_case__ : Optional[Any] = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
snake_case__ : int = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case__ : str = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
snake_case__ : int = output_channel
snake_case__ : Tuple = up_blocks
# out
snake_case__ : int = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
snake_case__ : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : bool = True , _lowerCamelCase : bool = False , ):
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
snake_case__ : Any = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case__ : Any = timesteps.astype(dtype=jnp.floataa )
snake_case__ : Any = jnp.expand_dims(_lowerCamelCase , 0 )
snake_case__ : List[str] = self.time_proj(_lowerCamelCase )
snake_case__ : int = self.time_embedding(_lowerCamelCase )
# 2. pre-process
snake_case__ : Any = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
snake_case__ : List[str] = self.conv_in(_lowerCamelCase )
# 3. down
snake_case__ : Dict = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
snake_case__ , snake_case__ : Dict = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
snake_case__ , snake_case__ : Tuple = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
snake_case__ : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
snake_case__ : Optional[int] = new_down_block_res_samples
# 4. mid
snake_case__ : Optional[int] = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
snake_case__ : List[Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
snake_case__ : List[str] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
snake_case__ : Any = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
snake_case__ : Optional[Any] = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
snake_case__ : List[Any] = self.conv_norm_out(_lowerCamelCase )
snake_case__ : Union[str, Any] = nn.silu(_lowerCamelCase )
snake_case__ : Any = self.conv_out(_lowerCamelCase )
snake_case__ : Optional[int] = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 303 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def lowercase__( A , A=False ):
snake_case__ : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowercase__( A , A , A=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : int = ''
else:
snake_case__ : Any = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Dict = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
snake_case__ : int = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Dict = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Any = in_proj_bias[: config.hidden_size]
snake_case__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : Any = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : Dict = in_proj_bias[-config.hidden_size :]
def lowercase__( A ):
snake_case__ : List[Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(A , A )
def lowercase__( A , A , A ):
snake_case__ : str = dct.pop(A )
snake_case__ : Optional[int] = val
def lowercase__( ):
snake_case__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : Any = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def lowercase__( A , A , A=True ):
snake_case__ : Any = ViTConfig()
# patch_size
if model_name[-1] == "8":
snake_case__ : Any = 8
# set labels if required
if not base_model:
snake_case__ : Tuple = 1_0_0_0
snake_case__ : int = 'huggingface/label-files'
snake_case__ : int = 'imagenet-1k-id2label.json'
snake_case__ : str = json.load(open(hf_hub_download(A , A , repo_type='dataset' ) , 'r' ) )
snake_case__ : Union[str, Any] = {int(A ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
snake_case__ : Optional[Any] = 3_8_4
snake_case__ : Optional[int] = 1_5_3_6
snake_case__ : List[Any] = 1_2
snake_case__ : Optional[Any] = 6
# load original model from torch hub
snake_case__ : Tuple = torch.hub.load('facebookresearch/dino:main' , A )
original_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : List[Any] = original_model.state_dict()
if base_model:
remove_classification_head_(A )
snake_case__ : str = create_rename_keys(A , base_model=A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A , A )
# load HuggingFace model
if base_model:
snake_case__ : int = ViTModel(A , add_pooling_layer=A ).eval()
else:
snake_case__ : Dict = ViTForImageClassification(A ).eval()
model.load_state_dict(A )
# Check outputs on an image, prepared by ViTImageProcessor
snake_case__ : Tuple = ViTImageProcessor()
snake_case__ : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case__ : Tuple = encoding['pixel_values']
snake_case__ : str = model(A )
if base_model:
snake_case__ : Optional[int] = original_model(A )
assert torch.allclose(A , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
snake_case__ : Optional[Any] = original_model(A )
assert logits.shape == outputs.logits.shape
assert torch.allclose(A , outputs.logits , atol=1e-3 )
Path(A ).mkdir(exist_ok=A )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
lowerCamelCase : Tuple = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 303 | 1 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=64 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ) -> int:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = embedding_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def _snake_case ( self ) -> Any:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ) -> str:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = MegatronBertModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = MegatronBertForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = MegatronBertForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = MegatronBertForNextSentencePrediction(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = MegatronBertForPreTraining(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , next_sentence_label=_lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = MegatronBertForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MegatronBertForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = MegatronBertForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = MegatronBertForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Optional[int] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCamelCase : Tuple = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Any = True
# test_resize_embeddings = False
__lowerCamelCase : str = False
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Tuple:
_lowerCAmelCase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCAmelCase )
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def _snake_case ( self ) -> str:
_lowerCAmelCase = MegatronBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def _snake_case ( self ) -> List[str]:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowerCAmelCase )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowerCAmelCase )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowerCAmelCase )
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowerCAmelCase )
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowerCAmelCase )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowerCAmelCase )
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowerCAmelCase )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
return torch.tensor(
SCREAMING_SNAKE_CASE_ , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ , )
_SCREAMING_SNAKE_CASE = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def _snake_case ( self ) -> str:
_lowerCAmelCase = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
_lowerCAmelCase = os.path.join(os.environ["MYDIR"] , _lowerCAmelCase )
_lowerCAmelCase = MegatronBertModel.from_pretrained(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.half()
_lowerCAmelCase = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )[0]
_lowerCAmelCase = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
_lowerCAmelCase = output[0, ii, jj]
_lowerCAmelCase = expected[3 * ii + jj]
_lowerCAmelCase = "ii={} jj={} a={} b={}".format(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(math.isclose(_lowerCAmelCase , _lowerCAmelCase , rel_tol=_lowerCAmelCase , abs_tol=_lowerCAmelCase ) , msg=_lowerCAmelCase )
| 18 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Union[str, Any] = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 | 0 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> Optional[int]:
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCamelCase__ , UpperCamelCase__ = head.next, head
while fast and fast.next:
UpperCamelCase__ = fast.next.next
UpperCamelCase__ = slow.next
UpperCamelCase__ = slow.next
UpperCamelCase__ = None # Don't forget here! But forget still works!
# reverse the second part
UpperCamelCase__ = None
while second:
UpperCamelCase__ = second.next
UpperCamelCase__ = node
UpperCamelCase__ = second
UpperCamelCase__ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCamelCase__ = node.next
UpperCamelCase__ = head.next
return True
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCamelCase__ = UpperCamelCase__ = UpperCamelCase__ = head
while fast and fast.next:
UpperCamelCase__ , UpperCamelCase__ = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCamelCase__ = [slow.val]
while slow.next:
UpperCamelCase__ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCamelCase__ = cur.next
return True
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
if not head or not head.next:
return True
UpperCamelCase__ = {}
UpperCamelCase__ = 0
while head:
if head.val in d:
d[head.val].append(__A )
else:
UpperCamelCase__ = [pos]
UpperCamelCase__ = head.next
pos += 1
UpperCamelCase__ = pos - 1
UpperCamelCase__ = 0
for v in d.values():
if len(__A ) % 2 != 0:
middle += 1
else:
UpperCamelCase__ = 0
for i in range(0 , len(__A ) ):
if v[i] + v[len(__A ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 714 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _UpperCamelCase ( __A , __A , __A ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = AutoConfig.from_pretrained(__A )
UpperCamelCase__ = FlaxAutoModelForSeqaSeqLM.from_config(config=__A )
UpperCamelCase__ = checkpoints.load_tax_checkpoint(__A )
UpperCamelCase__ = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
UpperCamelCase__ = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
UpperCamelCase__ = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase__ = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
UpperCamelCase__ = F'''layers_{str(__A )}'''
# Self-Attention
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
UpperCamelCase__ = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
UpperCamelCase__ = flax_model.params["encoder"]["block"][str(__A )]["layer"]
UpperCamelCase__ = tax_attention_key
UpperCamelCase__ = tax_attention_out
UpperCamelCase__ = tax_attention_query
UpperCamelCase__ = tax_attention_value
UpperCamelCase__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase__ = tax_global_layer_norm
if split_mlp_wi:
UpperCamelCase__ = tax_mlp_wi_a
UpperCamelCase__ = tax_mlp_wi_a
else:
UpperCamelCase__ = tax_mlp_wi
UpperCamelCase__ = tax_mlp_wo
UpperCamelCase__ = tax_mlp_layer_norm
UpperCamelCase__ = flax_model_encoder_layer_block
# Only for layer 0:
UpperCamelCase__ = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
UpperCamelCase__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
UpperCamelCase__ = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
UpperCamelCase__ = tax_encoder_global_rel_embedding
# Assigning
UpperCamelCase__ = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
UpperCamelCase__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
UpperCamelCase__ = F'''layers_{str(__A )}'''
# Self-Attention
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
UpperCamelCase__ = tax_enc_dec_attention_module["key"]["kernel"]
UpperCamelCase__ = tax_enc_dec_attention_module["out"]["kernel"]
UpperCamelCase__ = tax_enc_dec_attention_module["query"]["kernel"]
UpperCamelCase__ = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
UpperCamelCase__ = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
UpperCamelCase__ = flax_model.params["decoder"]["block"][str(__A )]["layer"]
UpperCamelCase__ = tax_attention_key
UpperCamelCase__ = tax_attention_out
UpperCamelCase__ = tax_attention_query
UpperCamelCase__ = tax_attention_value
UpperCamelCase__ = tax_pre_attention_layer_norm
UpperCamelCase__ = tax_enc_dec_attention_key
UpperCamelCase__ = tax_enc_dec_attention_out
UpperCamelCase__ = tax_enc_dec_attention_query
UpperCamelCase__ = tax_enc_dec_attention_value
UpperCamelCase__ = tax_cross_layer_norm
if split_mlp_wi:
UpperCamelCase__ = tax_mlp_wi_a
UpperCamelCase__ = tax_mlp_wi_a
else:
UpperCamelCase__ = tax_mlp_wi
UpperCamelCase__ = tax_mlp_wo
UpperCamelCase__ = txa_mlp_layer_norm
UpperCamelCase__ = flax_model_decoder_layer_block
# Decoder Normalization
UpperCamelCase__ = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
UpperCamelCase__ = txa_decoder_norm
# Only for layer 0:
UpperCamelCase__ = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
UpperCamelCase__ = tax_decoder_rel_embedding
# Token Embeddings
UpperCamelCase__ = tax_model["target"]["token_embedder"]["embedding"]
UpperCamelCase__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
UpperCamelCase__ = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(__A )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
a__ : Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 223 | 0 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : str=1 ) -> List[str]:
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Tuple=0 ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = []
for old_item in old_list:
UpperCAmelCase_ = old_item.replace("in_layers.0" , "norm1" )
UpperCAmelCase_ = new_item.replace("in_layers.2" , "conv1" )
UpperCAmelCase_ = new_item.replace("out_layers.0" , "norm2" )
UpperCAmelCase_ = new_item.replace("out_layers.3" , "conv2" )
UpperCAmelCase_ = new_item.replace("emb_layers.1" , "time_emb_proj" )
UpperCAmelCase_ = new_item.replace("skip_connection" , "conv_shortcut" )
UpperCAmelCase_ = shave_segments(snake_case_ , n_shave_prefix_segments=snake_case_ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : List[str]=0 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = []
for old_item in old_list:
UpperCAmelCase_ = old_item
UpperCAmelCase_ = new_item.replace("norm.weight" , "group_norm.weight" )
UpperCAmelCase_ = new_item.replace("norm.bias" , "group_norm.bias" )
UpperCAmelCase_ = new_item.replace("proj_out.weight" , "proj_attn.weight" )
UpperCAmelCase_ = new_item.replace("proj_out.bias" , "proj_attn.bias" )
UpperCAmelCase_ = shave_segments(snake_case_ , n_shave_prefix_segments=snake_case_ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : Dict=None , snake_case_ : str=None ) -> int:
'''simple docstring'''
assert isinstance(snake_case_ , snake_case_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase_ = old_checkpoint[path]
UpperCAmelCase_ = old_tensor.shape[0] // 3
UpperCAmelCase_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase_ = old_tensor.shape[0] // config['''num_head_channels'''] // 3
UpperCAmelCase_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase_ = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase_ = query.reshape(snake_case_ )
UpperCAmelCase_ = key.reshape(snake_case_ )
UpperCAmelCase_ = value.reshape(snake_case_ )
for path in paths:
UpperCAmelCase_ = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase_ = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
UpperCAmelCase_ = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
UpperCAmelCase_ = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase_ = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase_ = old_checkpoint[path['''old''']][:, :, 0]
else:
UpperCAmelCase_ = old_checkpoint[path['''old''']]
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint['''time_embed.0.weight''']
UpperCAmelCase_ = checkpoint['''time_embed.0.bias''']
UpperCAmelCase_ = checkpoint['''time_embed.2.weight''']
UpperCAmelCase_ = checkpoint['''time_embed.2.bias''']
UpperCAmelCase_ = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase_ = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase_ = checkpoint['''out.0.weight''']
UpperCAmelCase_ = checkpoint['''out.0.bias''']
UpperCAmelCase_ = checkpoint['''out.2.weight''']
UpperCAmelCase_ = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(snake_case_ )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(snake_case_ )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(snake_case_ )
}
for i in range(1 , snake_case_ ):
UpperCAmelCase_ = (i - 1) // (config['''num_res_blocks'''] + 1)
UpperCAmelCase_ = (i - 1) % (config['''num_res_blocks'''] + 1)
UpperCAmelCase_ = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
UpperCAmelCase_ = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCAmelCase_ = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
UpperCAmelCase_ = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCAmelCase_ = renew_resnet_paths(snake_case_ )
UpperCAmelCase_ = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCAmelCase_ = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path, resnet_op] , config=snake_case_ )
if len(snake_case_ ):
UpperCAmelCase_ = renew_attention_paths(snake_case_ )
UpperCAmelCase_ = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase_ = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case_ , config=snake_case_ , )
UpperCAmelCase_ = middle_blocks[0]
UpperCAmelCase_ = middle_blocks[1]
UpperCAmelCase_ = middle_blocks[2]
UpperCAmelCase_ = renew_resnet_paths(snake_case_ )
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , config=snake_case_ )
UpperCAmelCase_ = renew_resnet_paths(snake_case_ )
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , config=snake_case_ )
UpperCAmelCase_ = renew_attention_paths(snake_case_ )
UpperCAmelCase_ = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , attention_paths_to_split=snake_case_ , config=snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ = i // (config['''num_res_blocks'''] + 1)
UpperCAmelCase_ = i % (config['''num_res_blocks'''] + 1)
UpperCAmelCase_ = [shave_segments(snake_case_ , 2 ) for name in output_blocks[i]]
UpperCAmelCase_ = {}
for layer in output_block_layers:
UpperCAmelCase_ = layer.split("." )[0], shave_segments(snake_case_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case_ )
else:
UpperCAmelCase_ = [layer_name]
if len(snake_case_ ) > 1:
UpperCAmelCase_ = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
UpperCAmelCase_ = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
UpperCAmelCase_ = renew_resnet_paths(snake_case_ )
UpperCAmelCase_ = renew_resnet_paths(snake_case_ )
UpperCAmelCase_ = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase_ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
UpperCAmelCase_ = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCAmelCase_ = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(snake_case_ ) == 2:
UpperCAmelCase_ = []
if len(snake_case_ ):
UpperCAmelCase_ = renew_attention_paths(snake_case_ )
UpperCAmelCase_ = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase_ = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=snake_case_ , )
else:
UpperCAmelCase_ = renew_resnet_paths(snake_case_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase_ = '''.'''.join(["output_blocks", str(snake_case_ ), path["old"]] )
UpperCAmelCase_ = '''.'''.join(["up_blocks", str(snake_case_ ), "resnets", str(snake_case_ ), path["new"]] )
UpperCAmelCase_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE_: Any =parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[int] =torch.load(args.checkpoint_path)
with open(args.config_file) as f:
SCREAMING_SNAKE_CASE_: List[str] =json.loads(f.read())
SCREAMING_SNAKE_CASE_: Tuple =convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
SCREAMING_SNAKE_CASE_: Dict =UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
SCREAMING_SNAKE_CASE_: Any =DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE_: Optional[Any] =VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE_: Union[str, Any] =LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 78 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 678 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase : Optional[int] = '''
Human: <<task>>
Assistant: '''
_lowerCAmelCase : Union[str, Any] = '''huggingface-tools/default-prompts'''
_lowerCAmelCase : Optional[int] = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="run" ) -> Any:
'''simple docstring'''
if prompt_or_repo_id is None:
_lowerCamelCase : str = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCamelCase ) is not None:
return prompt_or_repo_id
_lowerCamelCase : Optional[int] = cached_file(
_lowerCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
return f.read() | 386 |
"""simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_lowerCamelCase )
_lowerCamelCase : List[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase )
_lowerCamelCase : str = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCamelCase : str = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
_lowerCamelCase : Optional[int] = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_lowerCamelCase : Optional[Any] = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : Optional[int] = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
_lowerCamelCase : Tuple = F"""layers_{str(_lowerCamelCase )}"""
# Self-Attention
_lowerCamelCase : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
_lowerCamelCase : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
_lowerCamelCase : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
_lowerCamelCase : int = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : Optional[int] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
_lowerCamelCase : Any = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
_lowerCamelCase : Any = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_lowerCamelCase : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_lowerCamelCase : List[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
_lowerCamelCase : Optional[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_lowerCamelCase : List[str] = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_lowerCamelCase : Tuple = flax_model.params["encoder"]["block"][str(_lowerCamelCase )]["layer"]
_lowerCamelCase : int = tax_attention_key
_lowerCamelCase : Union[str, Any] = tax_attention_out
_lowerCamelCase : str = tax_attention_query
_lowerCamelCase : Dict = tax_attention_value
_lowerCamelCase : str = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : Union[str, Any] = tax_global_layer_norm
if split_mlp_wi:
_lowerCamelCase : Optional[Any] = tax_mlp_wi_a
_lowerCamelCase : int = tax_mlp_wi_a
else:
_lowerCamelCase : str = tax_mlp_wi
_lowerCamelCase : Optional[int] = tax_mlp_wo
_lowerCamelCase : List[str] = tax_mlp_layer_norm
_lowerCamelCase : Tuple = flax_model_encoder_layer_block
# Only for layer 0:
_lowerCamelCase : Optional[int] = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
_lowerCamelCase : int = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCamelCase : int = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
_lowerCamelCase : List[str] = tax_encoder_global_rel_embedding
# Assigning
_lowerCamelCase : List[str] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
_lowerCamelCase : int = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_lowerCamelCase : str = F"""layers_{str(_lowerCamelCase )}"""
# Self-Attention
_lowerCamelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
_lowerCamelCase : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
_lowerCamelCase : Dict = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
_lowerCamelCase : Any = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
_lowerCamelCase : Tuple = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
_lowerCamelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
_lowerCamelCase : List[str] = tax_enc_dec_attention_module["key"]["kernel"]
_lowerCamelCase : Tuple = tax_enc_dec_attention_module["out"]["kernel"]
_lowerCamelCase : Union[str, Any] = tax_enc_dec_attention_module["query"]["kernel"]
_lowerCamelCase : Any = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
_lowerCamelCase : int = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
_lowerCamelCase : Optional[int] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
_lowerCamelCase : List[str] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
_lowerCamelCase : str = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
_lowerCamelCase : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
_lowerCamelCase : Tuple = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
_lowerCamelCase : str = flax_model.params["decoder"]["block"][str(_lowerCamelCase )]["layer"]
_lowerCamelCase : Tuple = tax_attention_key
_lowerCamelCase : List[str] = tax_attention_out
_lowerCamelCase : Union[str, Any] = tax_attention_query
_lowerCamelCase : Optional[int] = tax_attention_value
_lowerCamelCase : Optional[Any] = tax_pre_attention_layer_norm
_lowerCamelCase : Tuple = tax_enc_dec_attention_key
_lowerCamelCase : List[str] = tax_enc_dec_attention_out
_lowerCamelCase : Tuple = tax_enc_dec_attention_query
_lowerCamelCase : Tuple = tax_enc_dec_attention_value
_lowerCamelCase : Optional[Any] = tax_cross_layer_norm
if split_mlp_wi:
_lowerCamelCase : List[Any] = tax_mlp_wi_a
_lowerCamelCase : List[Any] = tax_mlp_wi_a
else:
_lowerCamelCase : Dict = tax_mlp_wi
_lowerCamelCase : Union[str, Any] = tax_mlp_wo
_lowerCamelCase : Dict = txa_mlp_layer_norm
_lowerCamelCase : Optional[int] = flax_model_decoder_layer_block
# Decoder Normalization
_lowerCamelCase : Tuple = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
_lowerCamelCase : Union[str, Any] = txa_decoder_norm
# Only for layer 0:
_lowerCamelCase : int = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
_lowerCamelCase : List[Any] = tax_decoder_rel_embedding
# Token Embeddings
_lowerCamelCase : Union[str, Any] = tax_model["target"]["token_embedder"]["embedding"]
_lowerCamelCase : Any = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_lowerCamelCase : Tuple = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
_lowerCAmelCase : int = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path) | 386 | 1 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
lowercase__ = {}
for old_key in state_dict.keys():
lowercase__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowercase__ = key.replace('''moe_layer.experts.0''' , f'ffn.experts.expert_{expert_idx}' )
else:
lowercase__ = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowercase__ = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowercase__ = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowercase__ = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowercase__ = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowercase__ = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowercase__ = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowercase__ = state_dict[old_key]
return new_dict
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = WEIGHTS_NAME ):
"""simple docstring"""
lowercase__ = []
lowercase__ = 0
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
for expert in range(SCREAMING_SNAKE_CASE ):
lowercase__ = switch_checkpoint_path + f'-rank-{expert}.pt'
if os.path.isfile(SCREAMING_SNAKE_CASE ):
lowercase__ = torch.load(SCREAMING_SNAKE_CASE )['''model''']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
lowercase__ = rename_fairseq_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = os.path.join(
SCREAMING_SNAKE_CASE , weights_name.replace('''.bin''' , f'-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin' ) )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(SCREAMING_SNAKE_CASE )[0]].dtype )
# Add the last block
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace('''.bin''' , f'-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin' ) )
lowercase__ = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
lowercase__ = rename_fairseq_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(SCREAMING_SNAKE_CASE ) == 1:
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Otherwise, let's build the index
lowercase__ = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ = weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE ):05d}.bin' )
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
for key in shard:
lowercase__ = shard_file
# Add the metadata
lowercase__ = {'''total_size''': total_size}
lowercase__ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' , encoding='''utf-8''' ) as f:
lowercase__ = json.dumps(SCREAMING_SNAKE_CASE , indent=2 , sort_keys=SCREAMING_SNAKE_CASE ) + '''\n'''
f.write(SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase, lowerCAmelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
lowerCAmelCase = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCAmelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 43 |
lowerCAmelCase = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
lowerCAmelCase = {value: key for key, value in encode_dict.items()}
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if set(SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
lowercase__ = ''''''
for word in coded.split():
while len(SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase__ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43 | 1 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_A : int = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCamelCase_ ( snake_case_ : Dict ) -> List[Any]:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> str:
'''simple docstring'''
return max(metric_fn(snake_case_ , snake_case_ ) for gt in ground_truths )
def UpperCamelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> str:
'''simple docstring'''
__lowerCAmelCase = [line.strip() for line in open(snake_case_ , """r""" ).readlines()]
__lowerCAmelCase = []
if args.gold_data_mode == "qa":
__lowerCAmelCase = pd.read_csv(snake_case_ , sep="""\t""" , header=snake_case_ )
for answer_list in data[1]:
__lowerCAmelCase = ast.literal_eval(snake_case_ )
answers.append(snake_case_ )
else:
__lowerCAmelCase = [line.strip() for line in open(snake_case_ , """r""" ).readlines()]
__lowerCAmelCase = [[reference] for reference in references]
__lowerCAmelCase = __lowerCAmelCase = __lowerCAmelCase = 0
for prediction, ground_truths in zip(snake_case_ , snake_case_ ):
total += 1
em += metric_max_over_ground_truths(snake_case_ , snake_case_ , snake_case_ )
fa += metric_max_over_ground_truths(snake_case_ , snake_case_ , snake_case_ )
__lowerCAmelCase = 1_00.0 * em / total
__lowerCAmelCase = 1_00.0 * fa / total
logger.info(f"""F1: {fa:.2f}""" )
logger.info(f"""EM: {em:.2f}""" )
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : List[Any] ) -> int:
'''simple docstring'''
__lowerCAmelCase = args.k
__lowerCAmelCase = [line.strip() for line in open(snake_case_ , """r""" ).readlines()]
__lowerCAmelCase = [line.strip() for line in open(snake_case_ , """r""" ).readlines()]
__lowerCAmelCase = __lowerCAmelCase = 0
for hypo, reference in zip(snake_case_ , snake_case_ ):
__lowerCAmelCase = set(hypo.split("""\t""" )[:k] )
__lowerCAmelCase = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__lowerCAmelCase = 1_00.0 * em / total
logger.info(f"""Precision@{k}: {em: .2f}""" )
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : Dict , snake_case_ : Tuple ) -> int:
'''simple docstring'''
def strip_title(snake_case_ : List[str] ):
if title.startswith("""\"""" ):
__lowerCAmelCase = title[1:]
if title.endswith("""\"""" ):
__lowerCAmelCase = title[:-1]
return title
__lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case_ , return_tensors="""pt""" , padding=snake_case_ , truncation=snake_case_ , )["""input_ids"""].to(args.device )
__lowerCAmelCase = rag_model.rag.question_encoder(snake_case_ )
__lowerCAmelCase = question_enc_outputs[0]
__lowerCAmelCase = rag_model.retriever(
snake_case_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
__lowerCAmelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__lowerCAmelCase = []
for docs in all_docs:
__lowerCAmelCase = [strip_title(snake_case_ ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(snake_case_ ) )
return provenance_strings
def UpperCamelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
__lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case_ , return_tensors="""pt""" , padding=snake_case_ , truncation=snake_case_ )
__lowerCAmelCase = inputs_dict.input_ids.to(args.device )
__lowerCAmelCase = inputs_dict.attention_mask.to(args.device )
__lowerCAmelCase = rag_model.generate( # rag_model overwrites generate
snake_case_ , attention_mask=snake_case_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=snake_case_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__lowerCAmelCase = rag_model.retriever.generator_tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
if args.print_predictions:
for q, a in zip(snake_case_ , snake_case_ ):
logger.info("""Q: {} - A: {}""".format(snake_case_ , snake_case_ ) )
return answers
def UpperCamelCase_ ( ) -> int:
'''simple docstring'''
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=snake_case_ , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=snake_case_ , choices=["""exact""", """compressed""", """legacy"""] , type=snake_case_ , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=snake_case_ , type=snake_case_ , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=snake_case_ , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=snake_case_ , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=snake_case_ , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=snake_case_ , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=snake_case_ , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=snake_case_ , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=snake_case_ , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=snake_case_ , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=snake_case_ , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def UpperCamelCase_ ( snake_case_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase = {}
if args.model_type is None:
__lowerCAmelCase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
__lowerCAmelCase = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
__lowerCAmelCase = args.n_docs
if args.index_name is not None:
__lowerCAmelCase = args.index_name
if args.index_path is not None:
__lowerCAmelCase = args.index_path
else:
__lowerCAmelCase = BartForConditionalGeneration
__lowerCAmelCase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , snake_case_ )
__lowerCAmelCase = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
__lowerCAmelCase = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(snake_case_ , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(snake_case_ ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
__lowerCAmelCase = RagRetriever.from_pretrained(snake_case_ , **snake_case_ )
__lowerCAmelCase = model_class.from_pretrained(snake_case_ , retriever=snake_case_ , **snake_case_ )
model.retriever.init_retrieval()
else:
__lowerCAmelCase = model_class.from_pretrained(snake_case_ , **snake_case_ )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
__lowerCAmelCase = []
for line in tqdm(snake_case_ ):
questions.append(line.strip() )
if len(snake_case_ ) == args.eval_batch_size:
__lowerCAmelCase = evaluate_batch_fn(snake_case_ , snake_case_ , snake_case_ )
preds_file.write("""\n""".join(snake_case_ ) + """\n""" )
preds_file.flush()
__lowerCAmelCase = []
if len(snake_case_ ) > 0:
__lowerCAmelCase = evaluate_batch_fn(snake_case_ , snake_case_ , snake_case_ )
preds_file.write("""\n""".join(snake_case_ ) )
preds_file.flush()
score_fn(snake_case_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_A : int = get_args()
main(args)
| 703 | '''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_A : Optional[Any] = logging.get_logger(__name__)
_A : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
__lowerCAmelCase = getattr(snake_case_ , snake_case_ ).shape
else:
__lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCamelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : Optional[Any] ) -> int:
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(snake_case_ )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , snake_case_ )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "weight" in name:
__lowerCAmelCase = """weight"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
else:
__lowerCAmelCase = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : int ) -> int:
'''simple docstring'''
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCAmelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def UpperCamelCase_ ( snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : Dict=True ) -> List[str]:
'''simple docstring'''
if config_path is not None:
__lowerCAmelCase = HubertConfig.from_pretrained(snake_case_ )
else:
__lowerCAmelCase = HubertConfig()
if is_finetuned:
if dict_path:
__lowerCAmelCase = Dictionary.load(snake_case_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCAmelCase = target_dict.pad_index
__lowerCAmelCase = target_dict.bos_index
__lowerCAmelCase = target_dict.eos_index
__lowerCAmelCase = len(target_dict.symbols )
__lowerCAmelCase = os.path.join(snake_case_ , """vocab.json""" )
if not os.path.isdir(snake_case_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(snake_case_ ) )
return
os.makedirs(snake_case_ , exist_ok=snake_case_ )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , snake_case_ )
__lowerCAmelCase = WavaVecaCTCTokenizer(
snake_case_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=snake_case_ , )
__lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
processor.save_pretrained(snake_case_ )
__lowerCAmelCase = HubertForCTC(snake_case_ )
else:
__lowerCAmelCase = HubertModel(snake_case_ )
if is_finetuned:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
_A : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_A : str = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 330 | 0 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
A : Optional[Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
A : str = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
A : int = 'zero2'
A : Tuple = 'zero3'
A : str = [ZEROa, ZEROa]
def snake_case__ ( _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Any ):
"""simple docstring"""
UpperCamelCase__ = parameterized.to_safe_name("_".join(str(_snake_case ) for x in param.args ) )
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
A : Tuple = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def lowerCamelCase__ ( self :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] ) -> Dict:
"""simple docstring"""
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def lowerCamelCase__ ( self :Optional[Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[Any] ) -> int:
"""simple docstring"""
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def lowerCamelCase__ ( self :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any] ) -> Any:
"""simple docstring"""
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_ )
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] ) -> str:
"""simple docstring"""
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
def lowerCamelCase__ ( self :List[Any] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCamelCase__ ( self :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :int = 1_0 , lowerCamelCase_ :bool = True , lowerCamelCase_ :bool = True , lowerCamelCase_ :bool = True , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = models[model]
UpperCamelCase__ = self.run_trainer(
stage=lowerCamelCase_ , model_name=lowerCamelCase_ , eval_steps=lowerCamelCase_ , num_train_epochs=1 , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
self.do_checks(lowerCamelCase_ )
return output_dir
def lowerCamelCase__ ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :int = 1_0 , lowerCamelCase_ :int = 1 , lowerCamelCase_ :bool = True , lowerCamelCase_ :bool = True , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ = self.get_auto_remove_tmp_dir("./xxx" , after=lowerCamelCase_ )
UpperCamelCase__ = f'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowerCamelCase_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
UpperCamelCase__ = f'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
UpperCamelCase__ = [f'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
UpperCamelCase__ = self.get_launcher(lowerCamelCase_ )
UpperCamelCase__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase_ , env=self.get_env() )
return output_dir
def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :Union[str, Any]=False ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ = min(2 , get_gpu_count() ) if distributed else 1
return f'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split() | 516 | """simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
def snake_case__ ( _snake_case : Union[str, Any] , _snake_case : Dict=False ):
"""simple docstring"""
UpperCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
UpperCamelCase__ = "segformer.encoder." + key
if key.startswith("backbone" ):
UpperCamelCase__ = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase__ = key[key.find("patch_embed" ) + len("patch_embed" )]
UpperCamelCase__ = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(_snake_case )-1}' )
if "norm" in key:
UpperCamelCase__ = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase__ = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
UpperCamelCase__ = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(_snake_case )-1}' )
if "layer_norm1" in key:
UpperCamelCase__ = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
UpperCamelCase__ = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase__ = key[key.find("block" ) + len("block" )]
UpperCamelCase__ = key.replace(F'block{idx}' , F'block.{int(_snake_case )-1}' )
if "attn.q" in key:
UpperCamelCase__ = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
UpperCamelCase__ = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
UpperCamelCase__ = key.replace("attn" , "attention.self" )
if "fc1" in key:
UpperCamelCase__ = key.replace("fc1" , "dense1" )
if "fc2" in key:
UpperCamelCase__ = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
UpperCamelCase__ = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
UpperCamelCase__ = key.replace("linear_fuse.conv" , "linear_fuse" )
UpperCamelCase__ = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase__ = key[key.find("linear_c" ) + len("linear_c" )]
UpperCamelCase__ = key.replace(F'linear_c{idx}' , F'linear_c.{int(_snake_case )-1}' )
if key.startswith("head" ):
UpperCamelCase__ = key.replace("head" , "classifier" )
UpperCamelCase__ = value
return new_state_dict
def snake_case__ ( _snake_case : str , _snake_case : List[Any] ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase__ = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
UpperCamelCase__ = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
UpperCamelCase__ = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase__ = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase__ = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase__ = kv_bias[
config.hidden_sizes[i] :
]
def snake_case__ ( ):
"""simple docstring"""
UpperCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def snake_case__ ( _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = SegformerConfig()
UpperCamelCase__ = False
# set attributes based on model_name
UpperCamelCase__ = "huggingface/label-files"
if "segformer" in model_name:
UpperCamelCase__ = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
UpperCamelCase__ = 1_50
UpperCamelCase__ = "ade20k-id2label.json"
UpperCamelCase__ = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
UpperCamelCase__ = 19
UpperCamelCase__ = "cityscapes-id2label.json"
UpperCamelCase__ = (1, 19, 1_28, 1_28)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
UpperCamelCase__ = True
UpperCamelCase__ = model_name[4:6]
UpperCamelCase__ = 10_00
UpperCamelCase__ = "imagenet-1k-id2label.json"
UpperCamelCase__ = (1, 10_00)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
UpperCamelCase__ = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ = {int(_snake_case ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 2_56
elif size == "b2":
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 7_68
UpperCamelCase__ = [3, 4, 6, 3]
elif size == "b3":
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 7_68
UpperCamelCase__ = [3, 4, 18, 3]
elif size == "b4":
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 7_68
UpperCamelCase__ = [3, 8, 27, 3]
elif size == "b5":
UpperCamelCase__ = [64, 1_28, 3_20, 5_12]
UpperCamelCase__ = 7_68
UpperCamelCase__ = [3, 6, 40, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
UpperCamelCase__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_snake_case , align=_snake_case , do_random_crop=_snake_case )
# prepare image
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=_snake_case , return_tensors="pt" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
UpperCamelCase__ = torch.load(_snake_case , map_location=torch.device("cpu" ) )
else:
UpperCamelCase__ = torch.load(_snake_case , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
UpperCamelCase__ = rename_keys(_snake_case , encoder_only=_snake_case )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_snake_case , _snake_case )
# create HuggingFace model and load state dict
if encoder_only:
UpperCamelCase__ = False
UpperCamelCase__ = SegformerForImageClassification(_snake_case )
else:
UpperCamelCase__ = SegformerForSemanticSegmentation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
UpperCamelCase__ = model(_snake_case )
UpperCamelCase__ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
UpperCamelCase__ = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
UpperCamelCase__ = torch.tensor(
[
[
[-1.1_372E01, -1.2_787E01, -1.3_477E01],
[-1.2_536E01, -1.4_194E01, -1.4_409E01],
[-1.3_217E01, -1.4_888E01, -1.5_327E01],
],
[
[-1.4_791E01, -1.7_122E01, -1.8_277E01],
[-1.7_163E01, -1.9_192E01, -1.9_533E01],
[-1.7_897E01, -1.9_991E01, -2.0_315E01],
],
[
[7.6_723E-01, 4.1_921E-01, -7.7_878E-02],
[4.7_772E-01, 9.5_557E-03, -2.8_082E-01],
[3.6_032E-01, -2.4_826E-01, -5.1_168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
UpperCamelCase__ = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
UpperCamelCase__ = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _snake_case , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
A : Dict = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 516 | 1 |
"""simple docstring"""
import re
def _UpperCamelCase ( UpperCamelCase ) -> list:
"""simple docstring"""
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def _UpperCamelCase ( UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : Dict = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
try:
__UpperCAmelCase : Optional[int] = split_input(UpperCamelCase )
if upper:
__UpperCAmelCase : str = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__UpperCAmelCase : List[str] = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _UpperCamelCase ( UpperCamelCase ) -> str:
"""simple docstring"""
return to_simple_case(UpperCamelCase )
def _UpperCamelCase ( UpperCamelCase ) -> str:
"""simple docstring"""
try:
__UpperCAmelCase : Any = to_simple_case(UpperCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
return to_complex_case(UpperCamelCase , UpperCamelCase , "_" )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
return to_complex_case(UpperCamelCase , UpperCamelCase , "-" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 711 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__UpperCAmelCase : Optional[int] = mf_knapsack(i - 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
__UpperCAmelCase : Any = max(
mf_knapsack(i - 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , mf_knapsack(i - 1 , UpperCamelCase , UpperCamelCase , j - wt[i - 1] ) + val[i - 1] , )
__UpperCAmelCase : str = val
return f[i][j]
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__UpperCAmelCase : Tuple = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__UpperCAmelCase : int = dp[i - 1][w_]
return dp[n][w_], dp
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
if not (isinstance(UpperCamelCase , (list, tuple) ) and isinstance(UpperCamelCase , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
__UpperCAmelCase : List[Any] = len(UpperCamelCase )
if num_items != len(UpperCamelCase ):
__UpperCAmelCase : int = (
"The number of weights must be the same as the number of values.\n"
f"But got {num_items} weights and {len(UpperCamelCase )} values"
)
raise ValueError(UpperCamelCase )
for i in range(UpperCamelCase ):
if not isinstance(wt[i] , UpperCamelCase ):
__UpperCAmelCase : Tuple = (
"All weights must be integers but got weight of "
f"type {type(wt[i] )} at index {i}"
)
raise TypeError(UpperCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Tuple = knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : set = set()
_construct_solution(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return optimal_val, example_optional_set
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
"""simple docstring"""
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCamelCase , UpperCamelCase , i - 1 , UpperCamelCase , UpperCamelCase )
else:
optimal_set.add(UpperCamelCase )
_construct_solution(UpperCamelCase , UpperCamelCase , i - 1 , j - wt[i - 1] , UpperCamelCase )
if __name__ == "__main__":
A = [3, 2, 4, 4]
A = [4, 3, 2, 3]
A = 4
A = 6
A = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
A , A = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
A , A = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 487 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.