code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase_ = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase_ = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = len([g for position, g in enumerate(__UpperCamelCase ) if g == main_target[position]] )
return (item, float(__UpperCamelCase ))
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = random.randint(0 ,len(__UpperCamelCase ) - 1 )
SCREAMING_SNAKE_CASE : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
SCREAMING_SNAKE_CASE : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: list[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = list(__UpperCamelCase )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
SCREAMING_SNAKE_CASE : Optional[Any] = random.choice(__UpperCamelCase )
return "".join(__UpperCamelCase )
def lowercase__( __UpperCamelCase: tuple[str, float] ,__UpperCamelCase: list[tuple[str, float]] ,__UpperCamelCase: list[str] ,):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# Generate more children proportionally to the fitness score.
SCREAMING_SNAKE_CASE : Optional[Any] = int(parent_a[1] * 1_00 ) + 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 ,__UpperCamelCase )][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = crossover(parent_a[0] ,__UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(__UpperCamelCase ,__UpperCamelCase ) )
pop.append(mutate(__UpperCamelCase ,__UpperCamelCase ) )
return pop
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: list[str] ,__UpperCamelCase: bool = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
SCREAMING_SNAKE_CASE : List[str] = f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(__UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
SCREAMING_SNAKE_CASE : List[Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
SCREAMING_SNAKE_CASE : List[Any] = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(__UpperCamelCase )
# Generate random starting population.
SCREAMING_SNAKE_CASE : Optional[Any] = []
for _ in range(__UpperCamelCase ):
population.append(''.join([random.choice(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
SCREAMING_SNAKE_CASE : Optional[int] = [evaluate(__UpperCamelCase ,__UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
SCREAMING_SNAKE_CASE : Union[str, Any] = sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x[1] ,reverse=__UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
SCREAMING_SNAKE_CASE : int = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__UpperCamelCase )
# Normalize population score to be between 0 and 1.
SCREAMING_SNAKE_CASE : str = [
(item, score / len(__UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(__UpperCamelCase ):
population.extend(select(population_score[int(__UpperCamelCase )] ,__UpperCamelCase ,__UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase_ = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
UpperCamelCase_ = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 251 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Optional[int] = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
__lowerCamelCase : int = """▁"""
# Segments (not really needed)
__lowerCamelCase : List[str] = 0
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = 2
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : int = 4
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = "left"
a_ = XLNetTokenizer
def __init__( self : str , __A : Tuple=None , __A : int=None , __A : Union[str, Any]=False , __A : Union[str, Any]=True , __A : Optional[Any]=False , __A : Union[str, Any]="<s>" , __A : List[str]="</s>" , __A : List[Any]="<unk>" , __A : Dict="<sep>" , __A : str="<pad>" , __A : str="<cls>" , __A : List[Any]="<mask>" , __A : Tuple=["<eop>", "<eod>"] , **__A : str , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : Optional[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
vocab_file=__A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , additional_special_tokens=__A , **__A , )
snake_case__ : Optional[Any] = 3
snake_case__ : Tuple = do_lower_case
snake_case__ : Any = remove_space
snake_case__ : Dict = keep_accents
snake_case__ : Optional[int] = vocab_file
snake_case__ : Union[str, Any] = False if not self.vocab_file else True
def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Optional[Any] = [self.sep_token_id]
snake_case__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : str = [self.sep_token_id]
snake_case__ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self : List[Any] , __A : str , __A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Tuple = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 286 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , __A : int , __A : str=7 , __A : Union[str, Any]=3 , __A : Union[str, Any]=3_0 , __A : Optional[int]=4_0_0 , __A : Optional[Any]=True , __A : Optional[int]=None , __A : Union[str, Any]=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : Any=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : Optional[Any]=1 / 2_5_5 , __A : Union[str, Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : Optional[Any] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : List[Any] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Tuple = num_channels
snake_case__ : List[Any] = min_resolution
snake_case__ : Optional[Any] = max_resolution
snake_case__ : str = do_resize
snake_case__ : List[str] = size
snake_case__ : List[Any] = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : List[Any] = image_std
snake_case__ : int = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : str = do_pad
def _lowercase ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : Dict , __A : Union[str, Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Any = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : List[str] = int(self.size["shortest_edge"] * h / w )
snake_case__ : Tuple = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Optional[Any] = self.size["shortest_edge"]
snake_case__ : List[Any] = self.size["shortest_edge"]
else:
snake_case__ : Union[str, Any] = []
for image in image_inputs:
snake_case__, snake_case__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Any = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Optional[int] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : Optional[int] ):
snake_case__ : str = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Tuple ):
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Any ):
pass
def _lowercase ( self : Optional[int] ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : Union[str, Any] = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Any ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Union[str, Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image and target
snake_case__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : List[str] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Optional[Any] = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : int = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : int = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Union[str, Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Union[str, Any] ):
# prepare image, target and masks_path
snake_case__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Optional[int] = json.loads(f.read() )
snake_case__ : Any = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : List[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Dict = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Dict = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 286 | 1 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def _A (__a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 10_24,
'''hidden_size''': 7_68,
'''max_length''': 5_12,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 10_24,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
SCREAMING_SNAKE_CASE_ : str = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
SCREAMING_SNAKE_CASE_ : Dict = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=__a , output_all_encodings=__a , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , __a ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
SCREAMING_SNAKE_CASE_ : str = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(get_home_dir() , '''models''' )
SCREAMING_SNAKE_CASE_ : List[Any] = _load_vocab(__a , __a , __a , cls=__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = nlp.model.BERTModel(
__a , len(__a ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=__a , use_token_type_embed=__a , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=__a , use_decoder=__a , )
original_bort.load_parameters(__a , cast_dtype=__a , ignore_extra=__a )
SCREAMING_SNAKE_CASE_ : List[str] = original_bort._collect_params_with_prefix()
# Build our config 🤗
SCREAMING_SNAKE_CASE_ : List[Any] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(__a ),
}
SCREAMING_SNAKE_CASE_ : List[str] = BertConfig.from_dict(__a )
SCREAMING_SNAKE_CASE_ : str = BertForMaskedLM(__a )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__a ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = hf_param.shape
SCREAMING_SNAKE_CASE_ : Optional[int] = to_torch(params[gluon_param] )
SCREAMING_SNAKE_CASE_ : Tuple = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
SCREAMING_SNAKE_CASE_ : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
SCREAMING_SNAKE_CASE_ : Dict = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
SCREAMING_SNAKE_CASE_ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
SCREAMING_SNAKE_CASE_ : BertSelfAttention = layer.attention.self
SCREAMING_SNAKE_CASE_ : Dict = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
SCREAMING_SNAKE_CASE_ : str = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
SCREAMING_SNAKE_CASE_ : Any = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
SCREAMING_SNAKE_CASE_ : str = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
SCREAMING_SNAKE_CASE_ : Tuple = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
SCREAMING_SNAKE_CASE_ : BertSelfOutput = layer.attention.output
SCREAMING_SNAKE_CASE_ : List[str] = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
SCREAMING_SNAKE_CASE_ : List[str] = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
SCREAMING_SNAKE_CASE_ : Any = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
SCREAMING_SNAKE_CASE_ : Tuple = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
SCREAMING_SNAKE_CASE_ : BertIntermediate = layer.intermediate
SCREAMING_SNAKE_CASE_ : List[Any] = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
SCREAMING_SNAKE_CASE_ : Any = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
SCREAMING_SNAKE_CASE_ : BertOutput = layer.output
SCREAMING_SNAKE_CASE_ : Dict = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
SCREAMING_SNAKE_CASE_ : Dict = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
SCREAMING_SNAKE_CASE_ : str = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
SCREAMING_SNAKE_CASE_ : int = RobertaTokenizer.from_pretrained('''roberta-base''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode_plus(__a )['''input_ids''']
# Get gluon output
SCREAMING_SNAKE_CASE_ : List[str] = mx.nd.array([input_ids] )
SCREAMING_SNAKE_CASE_ : int = original_bort(inputs=__a , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__a )
SCREAMING_SNAKE_CASE_ : List[str] = BertModel.from_pretrained(__a )
hf_bort_model.eval()
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode_plus(__a , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ : List[Any] = hf_bort_model(**__a )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = output_gluon[0].asnumpy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output_hf[0].detach().numpy()
SCREAMING_SNAKE_CASE_ : int = np.max(np.abs(hf_layer - gluon_layer ) ).item()
SCREAMING_SNAKE_CASE_ : Tuple = np.allclose(__a , __a , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , __a )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 91 |
'''simple docstring'''
from __future__ import annotations
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[True] * limit
__lowercase =False
__lowercase =False
__lowercase =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__lowercase =i * 2
while index < limit:
__lowercase =False
__lowercase =index + i
__lowercase =[2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def _A ( _lowerCAmelCase = 1_000_000 ):
"""simple docstring"""
__lowercase =prime_sieve(_lowerCAmelCase )
__lowercase =0
__lowercase =0
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + length , len(_lowerCAmelCase ) ):
__lowercase =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__lowercase =j - i
__lowercase =sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 166 | 0 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=512,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
__lowercase = parser.parse_args()
__lowercase = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 226 |
"""simple docstring"""
__lowercase = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
__lowercase = frozenset(["""prompt""", """negative_prompt"""])
__lowercase = frozenset([])
__lowercase = frozenset(["""image"""])
__lowercase = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowercase = frozenset(["""image"""])
__lowercase = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
__lowercase = frozenset(["""prompt""", """image""", """negative_prompt"""])
__lowercase = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
__lowercase = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
__lowercase = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowercase = frozenset(["""image""", """mask_image"""])
__lowercase = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowercase = frozenset(["""example_image""", """image""", """mask_image"""])
__lowercase = frozenset(["""class_labels"""])
__lowercase = frozenset(["""class_labels"""])
__lowercase = frozenset(["""batch_size"""])
__lowercase = frozenset([])
__lowercase = frozenset(["""batch_size"""])
__lowercase = frozenset([])
__lowercase = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
__lowercase = frozenset(["""prompt""", """negative_prompt"""])
__lowercase = frozenset(["""input_tokens"""])
__lowercase = frozenset(["""input_tokens"""])
| 226 | 1 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
A : Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
A : Any = [0, 25, 50]
A : List[Any] = [25, 50, 75]
A : str = fuzz.membership.trimf(X, abca)
A : str = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
A : List[str] = np.ones(75)
A : Optional[Any] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
A : Dict = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
A : Dict = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
A : Tuple = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
A : int = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
A : Union[str, Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
A : Optional[Any] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
A : int = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
A : Optional[int] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 118 | """simple docstring"""
from collections.abc import Generator
def a_ ( ):
'''simple docstring'''
lowercase__ , lowercase__ : List[str] = 0, 1
while True:
lowercase__ , lowercase__ : Optional[int] = b, a + b
yield b
def a_ ( _lowerCAmelCase : int = 1000 ):
'''simple docstring'''
lowercase__ : List[Any] = 1
lowercase__ : Any = fibonacci_generator()
while len(str(next(_lowerCAmelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 77 | 0 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __A () ->None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 254 |
"""simple docstring"""
__A = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__A = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = True
lowerCAmelCase__ :Tuple = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
order.append(_SCREAMING_SNAKE_CASE )
return order
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = True
lowerCAmelCase__ :Union[str, Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return component
def __A (_SCREAMING_SNAKE_CASE ) ->list[list[int]]:
"""simple docstring"""
lowerCAmelCase__ :Any = len(_SCREAMING_SNAKE_CASE ) * [False]
lowerCAmelCase__ :dict[int, list[int]] = {vert: [] for vert in range(len(_SCREAMING_SNAKE_CASE ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = []
for i, was_visited in enumerate(_SCREAMING_SNAKE_CASE ):
if not was_visited:
order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = []
lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) * [False]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :Dict = order[len(_SCREAMING_SNAKE_CASE ) - i - 1]
if not visited[vert]:
lowerCAmelCase__ :Union[str, Any] = find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
components_list.append(_SCREAMING_SNAKE_CASE )
return components_list
| 254 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__a = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__a = {
'allenai/longformer-base-4096': 4_0_9_6,
'allenai/longformer-large-4096': 4_0_9_6,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_0_9_6,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_0_9_6,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def a ( ):
'''simple docstring'''
lowercase_ = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase_ = bs[:]
lowercase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
lowercase_ = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__ , snake_case__ ) )
def a ( snake_case__: Union[str, Any] ):
'''simple docstring'''
lowercase_ = set()
lowercase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ = char
return pairs
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[Any] = VOCAB_FILES_NAMES
a :List[str] = PRETRAINED_VOCAB_FILES_MAP
a :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a :Dict = ['input_ids', 'attention_mask']
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any]="replace" , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="<unk>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<mask>" , SCREAMING_SNAKE_CASE_ : str=False , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> Any:
lowercase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
lowercase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
lowercase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
lowercase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
lowercase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
lowercase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowercase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowercase_ = {v: k for k, v in self.encoder.items()}
lowercase_ = errors # how to handle errors in decoding
lowercase_ = bytes_to_unicode()
lowercase_ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowercase_ = merges_handle.read().split('''\n''' )[1:-1]
lowercase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowercase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowercase_ = {}
lowercase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase_ = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _lowercase ( self : str ) -> List[Any]:
return len(self.encoder )
def _lowercase ( self : Union[str, Any] ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]:
if token in self.cache:
return self.cache[token]
lowercase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowercase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ = bigram
lowercase_ = []
lowercase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowercase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowercase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowercase_ = ''' '''.join(SCREAMING_SNAKE_CASE_ )
lowercase_ = word
return word
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
lowercase_ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
lowercase_ = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) )
return bpe_tokens
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]:
lowercase_ = ''''''.join(SCREAMING_SNAKE_CASE_ )
lowercase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
lowercase_ = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowercase_ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ = [self.cls_token_id]
lowercase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]:
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int]=False , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
lowercase_ = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
lowercase_ = ''' ''' + text
return (text, kwargs)
| 30 |
import itertools
import math
def a ( snake_case__: int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( ):
'''simple docstring'''
lowercase_ = 2
while True:
if is_prime(snake_case__ ):
yield num
num += 1
def a ( snake_case__: int = 10_001 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 30 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
UpperCamelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
UpperCamelCase = """</w>"""
UpperCamelCase = """@@ """
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[Any] = set()
A_ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A_ : Optional[Any] = char
return pairs
# Speech2Text2 has no max input length
UpperCamelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1024}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ["input_ids", "attention_mask"]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , )->Dict:
'''simple docstring'''
super().__init__(
unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ : str = do_lower_case
with open(_SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle:
A_ : List[Any] = json.load(_SCREAMING_SNAKE_CASE )
A_ : List[str] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A_ : List[Any] = None
A_ : Optional[Any] = None
else:
with open(_SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle:
A_ : Union[str, Any] = merges_handle.read().split('''\n''' )[:-1]
A_ : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
A_ : Union[str, Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
A_ : List[str] = {}
@property
def _snake_case ( self )->int:
'''simple docstring'''
return len(self.decoder )
def _snake_case ( self )->Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
A_ : Any = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A_ : str = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
A_ : Union[str, Any] = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : str = []
A_ : List[Any] = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
A_ : List[Any] = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Union[str, Any] = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : int = tuple(_SCREAMING_SNAKE_CASE )
A_ : List[str] = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
A_ : Tuple = get_pairs(_SCREAMING_SNAKE_CASE )
A_ : Tuple = ''' '''.join(_SCREAMING_SNAKE_CASE )
if word == "\n " + BPE_TOKEN_MERGES:
A_ : Tuple = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(_SCREAMING_SNAKE_CASE ):
A_ : List[Any] = word.replace(_SCREAMING_SNAKE_CASE , '''''' )
A_ : Any = word.replace(''' ''' , _SCREAMING_SNAKE_CASE )
A_ : str = word
return word
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
A_ : List[str] = text.lower()
A_ : List[Any] = text.split()
A_ : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(''' ''' ) ) )
return split_tokens
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
A_ : str = self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
return result
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
A_ : List[Any] = ''' '''.join(_SCREAMING_SNAKE_CASE )
# make sure @@ tokens are concatenated
A_ : List[Any] = ''''''.join(string.split(_SCREAMING_SNAKE_CASE ) )
return string
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ : List[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
A_ : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + '''\n''' )
A_ : Optional[Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
A_ : Optional[Any] = token_index
writer.write(''' '''.join(_SCREAMING_SNAKE_CASE ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 65 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = 42
snake_case = 42
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , )->Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
A_ : List[Any] = self.unet.config.sample_size
A_ : List[Any] = (batch_size, 3, img_size, img_size)
A_ : List[Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A_ : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A_ : str = self.scheduler.schedule[t]
A_ : List[str] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A_ , A_ : List[str] = self.scheduler.add_noise_to_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A_ : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A_ : Dict = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A_ : int = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
A_ : Optional[Any] = self.scheduler.step_correct(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step_output.prev_sample , step_output['''derivative'''] , )
A_ : List[Any] = step_output.prev_sample
A_ : Union[str, Any] = (sample / 2 + 0.5).clamp(0 , 1 )
A_ : List[str] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A_ : Dict = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 65 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Dict = TFAutoModel.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModel.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Dict = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForPreTraining.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = AutoModelForPreTraining.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Any = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : str = TFAutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = AutoModelForMaskedLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Any = AutoModelForMaskedLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_pt=_snake_case )
lowercase__ , lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ,from_tf=_snake_case )
lowercase__ , lowercase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
_snake_case ,output_loading_info=_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : Tuple = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = TFAutoModelForSequenceClassification.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
@slow
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
lowercase__ : Any = AutoModelForQuestionAnswering.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = TFAutoModelWithLMHead.from_pretrained(_snake_case ,from_pt=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
lowercase__ : int = AutoModelWithLMHead.from_pretrained(_snake_case ,from_tf=_snake_case )
self.assertIsInstance(_snake_case ,_snake_case )
self.assertEqual(model.num_parameters() ,14_410 )
self.assertEqual(model.num_parameters(only_trainable=_snake_case ) ,14_410 )
| 16 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase (_UpperCAmelCase ):
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=False , A_=True , A_="None" , A_=3 , A_=4 , A_=None , ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : List[str] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : List[Any] = is_training
__lowerCAmelCase : List[Any] = use_input_mask
__lowerCAmelCase : Optional[int] = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : int = hidden_size
__lowerCAmelCase : Any = num_hidden_layers
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : int = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : List[str] = relative_attention
__lowerCAmelCase : Union[str, Any] = position_biased_input
__lowerCAmelCase : int = pos_att_type
__lowerCAmelCase : List[Any] = scope
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : int = None
if self.use_input_mask:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : int = None
__lowerCAmelCase : List[str] = None
if self.use_labels:
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = self.get_config()
__lowerCAmelCase : Dict = 300
return config
def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = DebertaModel(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : str = model(A_ , attention_mask=A_ , token_type_ids=A_ )[0]
__lowerCAmelCase : Any = model(A_ , token_type_ids=A_ )[0]
__lowerCAmelCase : List[str] = model(A_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = DebertaForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Any = self.num_labels
__lowerCAmelCase : Tuple = DebertaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Optional[int] = DebertaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Tuple = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : List[str] = DebertaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : int = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : Tuple = config_and_inputs
__lowerCAmelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : int = DebertaModelTester(self )
__lowerCAmelCase : List[Any] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*A_ )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*A_ )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*A_ )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*A_ )
@slow
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = DebertaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase (unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
pass
@slow
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__lowerCAmelCase : Tuple = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(A_ , attention_mask=A_ )[0]
# compare the actual values for a slice.
__lowerCAmelCase : Optional[Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 275 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : int ) -> str:
if not isinstance(lowercase , lowercase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(lowercase , lowercase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
_a = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowercase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(__a , __a ).arrange(__a , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
_a = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_a = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Loaded Checkpoint" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_a = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
_a = []
_a = []
for i, rect in enumerate(__a ):
_a = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
_a = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 346 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowerCamelCase = logging.getLogger(__name__)
def lowerCamelCase_ ( _a , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = False , ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = bnb_quantization_config.load_in_abit
lowerCAmelCase__ : List[str] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCAmelCase__ : Union[str, Any] = []
# custom device map
if isinstance(_a , _a ) and len(device_map.keys() ) > 1:
lowerCAmelCase__ : Any = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase__ : Tuple = get_keys_to_not_convert(_a )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_a )
lowerCAmelCase__ : Optional[int] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : Any = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_a )
# compatibility with peft
lowerCAmelCase__ : str = load_in_abit
lowerCAmelCase__ : str = load_in_abit
lowerCAmelCase__ : Any = get_parameter_device(_a )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCAmelCase__ : List[str] = replace_with_bnb_layers(_a , _a , modules_to_not_convert=_a )
# convert param to the right dtype
lowerCAmelCase__ : List[Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCAmelCase__ : Any = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCAmelCase__ : int = getattr(_a , _a , _a )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_a ):
param.to(_a )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
lowerCAmelCase__ : Dict = replace_with_bnb_layers(
_a , _a , modules_to_not_convert=_a )
lowerCAmelCase__ : int = get_quantized_model_device_map(
_a , _a , _a , max_memory=_a , no_split_module_classes=_a , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Dict = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
_a , _a , _a , dtype=bnb_quantization_config.torch_dtype , offload_folder=_a , offload_state_dict=_a , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_a , device_map=_a , offload_dir=_a )
def lowerCamelCase_ ( _a , _a , _a=None , _a=None , _a=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase__ : Tuple = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(_a , _a ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCAmelCase__ : Any = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCAmelCase__ : Any = {}
lowerCAmelCase__ : Dict = special_dtypes
lowerCAmelCase__ : Optional[Any] = no_split_module_classes
lowerCAmelCase__ : Dict = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase__ : Dict = get_balanced_memory(
_a , low_zero=(device_map == '''balanced_low_0''') , max_memory=_a , **_a , )
lowerCAmelCase__ : Optional[int] = max_memory
lowerCAmelCase__ : int = infer_auto_device_map(_a , **_a )
if isinstance(_a , _a ):
# check if don't have any quantized module on the cpu
lowerCAmelCase__ : Optional[int] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase__ : List[str] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def lowerCamelCase_ ( _a , _a , _a=None , _a=None ):
"""simple docstring"""
if modules_to_not_convert is None:
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ , lowerCAmelCase__ : str = _replace_with_bnb_layers(
_a , _a , _a , _a )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def lowerCamelCase_ ( _a , _a , _a=None , _a=None , ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase__ : Union[str, Any] = []
current_key_name.append(_a )
if isinstance(_a , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase__ : List[Any] = '''.'''.join(_a )
lowerCAmelCase__ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase__ : Tuple = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase__ : List[str] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_a , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase__ : Union[str, Any] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCAmelCase__ : List[str] = module.weight.data
if module.bias is not None:
lowerCAmelCase__ : str = module.bias.data
bnb_module.requires_grad_(_a )
setattr(_a , _a , _a )
lowerCAmelCase__ : List[Any] = True
if len(list(module.children() ) ) > 0:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = _replace_with_bnb_layers(
_a , _a , _a , _a )
lowerCAmelCase__ : str = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCamelCase_ ( _a ):
"""simple docstring"""
with init_empty_weights():
lowerCAmelCase__ : Tuple = deepcopy(_a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase__ : Union[str, Any] = find_tied_parameters(_a )
# For compatibility with Accelerate < 0.18
if isinstance(_a , _a ):
lowerCAmelCase__ : Dict = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase__ : List[Any] = sum(_a , [] )
lowerCAmelCase__ : Optional[Any] = len(_a ) > 0
# Check if it is a base model
lowerCAmelCase__ : Dict = False
if hasattr(_a , '''base_model_prefix''' ):
lowerCAmelCase__ : int = not hasattr(_a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase__ : Union[str, Any] = list(model.named_children() )
lowerCAmelCase__ : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase__ : str = set(_a ) - set(_a )
lowerCAmelCase__ : Dict = list(set(_a ) ) + list(_a )
# remove ".weight" from the keys
lowerCAmelCase__ : str = ['''.weight''', '''.bias''']
lowerCAmelCase__ : int = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase__ : Union[str, Any] = name.replace(_a , '''''' )
filtered_module_names.append(_a )
return filtered_module_names
def lowerCamelCase_ ( _a ):
"""simple docstring"""
for m in model.modules():
if isinstance(_a , bnb.nn.Linearabit ):
return True
return False
def lowerCamelCase_ ( _a ):
"""simple docstring"""
return next(parameter.parameters() ).device
def lowerCamelCase_ ( _a , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_a , _a , 0 , dtype=_a , value=_a )
lowerCAmelCase__ : Optional[Any] = param_name
lowerCAmelCase__ : Union[str, Any] = model
if "." in tensor_name:
lowerCAmelCase__ : Optional[Any] = tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCAmelCase__ : Optional[int] = getattr(_a , _a )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
lowerCAmelCase__ : str = new_module
lowerCAmelCase__ : List[str] = splits[-1]
# offload weights
lowerCAmelCase__ : Union[str, Any] = False
offload_weight(module._parameters[tensor_name] , _a , _a , index=_a )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , _a , index=_a , )
else:
offload_weight(_a , _a , _a , index=_a )
offload_weight(_a , param_name.replace('''weight''' , '''SCB''' ) , _a , index=_a )
set_module_tensor_to_device(_a , _a , '''meta''' , dtype=_a , value=torch.empty(*param.size() ) )
| 131 |
from copy import deepcopy
class _a :
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : list[int] | None = None , _SCREAMING_SNAKE_CASE : int | None = None )-> None:
if arr is None and size is not None:
lowerCAmelCase__ : str = size
lowerCAmelCase__ : Optional[Any] = [0] * size
elif arr is not None:
self.init(_SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Either arr or size must be specified''' )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : list[int] )-> None:
lowerCAmelCase__ : int = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = deepcopy(_SCREAMING_SNAKE_CASE )
for i in range(1 , self.size ):
lowerCAmelCase__ : Optional[Any] = self.next_(_SCREAMING_SNAKE_CASE )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase__( self : Optional[int] )-> list[int]:
lowerCAmelCase__ : Any = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCAmelCase__ : Dict = self.next_(_SCREAMING_SNAKE_CASE )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int )-> int:
return index + (index & (-index))
@staticmethod
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int )-> int:
return index - (index & (-index))
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )-> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCAmelCase__ : List[str] = self.next_(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )-> None:
self.add(_SCREAMING_SNAKE_CASE , value - self.get(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : int )-> int:
if right == 0:
return 0
lowerCAmelCase__ : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCAmelCase__ : Dict = self.prev(_SCREAMING_SNAKE_CASE )
return result
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )-> int:
return self.prefix(_SCREAMING_SNAKE_CASE ) - self.prefix(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : int )-> int:
return self.query(_SCREAMING_SNAKE_CASE , index + 1 )
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : int )-> int:
value -= self.tree[0]
if value < 0:
return -1
lowerCAmelCase__ : Any = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCAmelCase__ : List[Any] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class __lowerCAmelCase :
def __init__( self: List[str] , _lowerCAmelCase: Optional[Any] ):
lowercase :Any = str(id_ )
lowercase :Any = None
lowercase :List[str] = None
lowercase :str = []
lowercase :str = {} # {vertex:distance}
def __lt__( self: str , _lowerCAmelCase: List[str] ):
return self.key < other.key
def __repr__( self: int ):
return self.id
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: Any ):
self.neighbors.append(_a )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Dict , _lowerCAmelCase: Tuple ):
lowercase :Any = weight
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1], _snake_case )
graph[b - 1].add_edge(graph[a - 1], _snake_case )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
lowercase :Optional[int] = []
for u in graph:
lowercase :Optional[int] = math.inf
lowercase :Tuple = None
lowercase :int = 0
lowercase :Tuple = graph[:]
while q:
lowercase :Optional[int] = min(_snake_case )
q.remove(_snake_case )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase :Dict = u
lowercase :str = u.edges[v.id]
for i in range(1, len(_snake_case ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
for u in graph:
lowercase :Optional[int] = math.inf
lowercase :str = None
lowercase :List[Any] = 0
lowercase :Dict = list(_snake_case )
hq.heapify(_snake_case )
while h:
lowercase :Optional[int] = hq.heappop(_snake_case )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase :List[str] = u
lowercase :Dict = u.edges[v.id]
hq.heapify(_snake_case )
for i in range(1, len(_snake_case ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCAmelCase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase : int = "RegNetConfig"
# Base docstring
_UpperCAmelCase : Optional[int] = "facebook/regnet-y-040"
_UpperCAmelCase : Any = [1, 1088, 7, 7]
# Image classification docstring
_UpperCAmelCase : Any = "facebook/regnet-y-040"
_UpperCAmelCase : List[str] = "tabby, tabby cat"
_UpperCAmelCase : Optional[int] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __lowerCAmelCase ( tf.keras.layers.Layer):
def __init__( self: List[str] , _lowerCAmelCase: int , _lowerCAmelCase: int = 3 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: Optional[str] = "relu" , **_lowerCAmelCase: Any , ):
super().__init__(**_lowerCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase :List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase :Dict = tf.keras.layers.ConvaD(
filters=_lowerCAmelCase , kernel_size=_lowerCAmelCase , strides=_lowerCAmelCase , padding="VALID" , groups=_lowerCAmelCase , use_bias=_lowerCAmelCase , name="convolution" , )
lowercase :Tuple = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
lowercase :Optional[int] = ACTaFN[activation] if activation is not None else tf.identity
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: Tuple ):
lowercase :Dict = self.convolution(self.padding(_lowerCAmelCase ) )
lowercase :Union[str, Any] = self.normalization(_lowerCAmelCase )
lowercase :Union[str, Any] = self.activation(_lowerCAmelCase )
return hidden_state
class __lowerCAmelCase ( tf.keras.layers.Layer):
def __init__( self: Dict , _lowerCAmelCase: RegNetConfig , **_lowerCAmelCase: Dict ):
super().__init__(**_lowerCAmelCase )
lowercase :Optional[Any] = config.num_channels
lowercase :str = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Optional[int] ):
lowercase :List[Any] = shape_list(_lowerCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase :Tuple = tf.transpose(_lowerCAmelCase , perm=(0, 2, 3, 1) )
lowercase :List[Any] = self.embedder(_lowerCAmelCase )
return hidden_state
class __lowerCAmelCase ( tf.keras.layers.Layer):
def __init__( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: int = 2 , **_lowerCAmelCase: Union[str, Any] ):
super().__init__(**_lowerCAmelCase )
lowercase :Union[str, Any] = tf.keras.layers.ConvaD(
filters=_lowerCAmelCase , kernel_size=1 , strides=_lowerCAmelCase , use_bias=_lowerCAmelCase , name="convolution" )
lowercase :List[Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: tf.Tensor , _lowerCAmelCase: bool = False ):
return self.normalization(self.convolution(_lowerCAmelCase ) , training=_lowerCAmelCase )
class __lowerCAmelCase ( tf.keras.layers.Layer):
def __init__( self: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: int , **_lowerCAmelCase: Optional[Any] ):
super().__init__(**_lowerCAmelCase )
lowercase :Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCAmelCase , name="pooler" )
lowercase :Any = [
tf.keras.layers.ConvaD(filters=_lowerCAmelCase , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=_lowerCAmelCase , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: str ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase :List[Any] = self.pooler(_lowerCAmelCase )
for layer_module in self.attention:
lowercase :Optional[Any] = layer_module(_lowerCAmelCase )
lowercase :List[Any] = hidden_state * pooled
return hidden_state
class __lowerCAmelCase ( tf.keras.layers.Layer):
def __init__( self: Dict , _lowerCAmelCase: RegNetConfig , _lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: int = 1 , **_lowerCAmelCase: Tuple ):
super().__init__(**_lowerCAmelCase )
lowercase :Union[str, Any] = in_channels != out_channels or stride != 1
lowercase :int = max(1 , out_channels // config.groups_width )
lowercase :Any = (
TFRegNetShortCut(_lowerCAmelCase , stride=_lowerCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase :Optional[int] = [
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_lowerCAmelCase , stride=_lowerCAmelCase , groups=_lowerCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase , name="layer.2" ),
]
lowercase :Dict = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Optional[int] ):
lowercase :List[str] = hidden_state
for layer_module in self.layers:
lowercase :int = layer_module(_lowerCAmelCase )
lowercase :Optional[int] = self.shortcut(_lowerCAmelCase )
hidden_state += residual
lowercase :Optional[int] = self.activation(_lowerCAmelCase )
return hidden_state
class __lowerCAmelCase ( tf.keras.layers.Layer):
def __init__( self: int , _lowerCAmelCase: RegNetConfig , _lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: int = 1 , **_lowerCAmelCase: Any ):
super().__init__(**_lowerCAmelCase )
lowercase :Union[str, Any] = in_channels != out_channels or stride != 1
lowercase :Optional[int] = max(1 , out_channels // config.groups_width )
lowercase :Union[str, Any] = (
TFRegNetShortCut(_lowerCAmelCase , stride=_lowerCAmelCase , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowercase :Union[str, Any] = [
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
_lowerCAmelCase , stride=_lowerCAmelCase , groups=_lowerCAmelCase , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(_lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(_lowerCAmelCase , kernel_size=1 , activation=_lowerCAmelCase , name="layer.3" ),
]
lowercase :Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Dict ):
lowercase :List[Any] = hidden_state
for layer_module in self.layers:
lowercase :str = layer_module(_lowerCAmelCase )
lowercase :Tuple = self.shortcut(_lowerCAmelCase )
hidden_state += residual
lowercase :str = self.activation(_lowerCAmelCase )
return hidden_state
class __lowerCAmelCase ( tf.keras.layers.Layer):
def __init__( self: Dict , _lowerCAmelCase: RegNetConfig , _lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: int = 2 , _lowerCAmelCase: int = 2 , **_lowerCAmelCase: Union[str, Any] ):
super().__init__(**_lowerCAmelCase )
lowercase :List[str] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowercase :Union[str, Any] = [
# downsampling is done in the first layer with stride of 2
layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , stride=_lowerCAmelCase , name="layers.0" ),
*[layer(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , name=F"layers.{i+1}" ) for i in range(depth - 1 )],
]
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Optional[Any] ):
for layer_module in self.layers:
lowercase :Tuple = layer_module(_lowerCAmelCase )
return hidden_state
class __lowerCAmelCase ( tf.keras.layers.Layer):
def __init__( self: Union[str, Any] , _lowerCAmelCase: RegNetConfig , **_lowerCAmelCase: Optional[Any] ):
super().__init__(**_lowerCAmelCase )
lowercase :Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowercase :Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_lowerCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , depth=_lowerCAmelCase , name=F"stages.{i+1}" ) )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: tf.Tensor , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = True ):
lowercase :str = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase :int = hidden_states + (hidden_state,)
lowercase :List[Any] = stage_module(_lowerCAmelCase )
if output_hidden_states:
lowercase :Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_lowerCAmelCase , hidden_states=_lowerCAmelCase )
@keras_serializable
class __lowerCAmelCase ( tf.keras.layers.Layer):
_a = RegNetConfig
def __init__( self: List[Any] , _lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: Tuple ):
super().__init__(**_lowerCAmelCase )
lowercase :List[str] = config
lowercase :List[str] = TFRegNetEmbeddings(_lowerCAmelCase , name="embedder" )
lowercase :Optional[Any] = TFRegNetEncoder(_lowerCAmelCase , name="encoder" )
lowercase :Tuple = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_lowerCAmelCase , name="pooler" )
@unpack_inputs
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: tf.Tensor , _lowerCAmelCase: Optional[bool] = None , _lowerCAmelCase: Optional[bool] = None , _lowerCAmelCase: bool = False , ):
lowercase :List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase :Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase :Tuple = self.embedder(_lowerCAmelCase , training=_lowerCAmelCase )
lowercase :Dict = self.encoder(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase )
lowercase :str = encoder_outputs[0]
lowercase :str = self.pooler(_lowerCAmelCase )
# Change to NCHW output format have uniformity in the modules
lowercase :Optional[int] = tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) )
lowercase :int = tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase :Tuple = tuple([tf.transpose(_lowerCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCAmelCase , pooler_output=_lowerCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __lowerCAmelCase ( lowerCAmelCase):
_a = RegNetConfig
_a = '''regnet'''
_a = '''pixel_values'''
@property
def SCREAMING_SNAKE_CASE ( self: Any ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
_UpperCAmelCase : Optional[int] = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
_UpperCAmelCase : Optional[Any] = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , lowerCAmelCase , )
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: Dict , _lowerCAmelCase: RegNetConfig , *_lowerCAmelCase: str , **_lowerCAmelCase: Dict ):
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
lowercase :List[Any] = TFRegNetMainLayer(_lowerCAmelCase , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: tf.Tensor , _lowerCAmelCase: Optional[bool] = None , _lowerCAmelCase: Optional[bool] = None , _lowerCAmelCase: Any=False , ):
lowercase :Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase :Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase :Optional[Any] = self.regnet(
pixel_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowerCAmelCase , )
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase):
def __init__( self: List[Any] , _lowerCAmelCase: RegNetConfig , *_lowerCAmelCase: List[Any] , **_lowerCAmelCase: List[Any] ):
super().__init__(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
lowercase :Any = config.num_labels
lowercase :int = TFRegNetMainLayer(_lowerCAmelCase , name="regnet" )
# classification head
lowercase :List[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: tf.Tensor = None , _lowerCAmelCase: tf.Tensor = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: bool = None , _lowerCAmelCase: Tuple=False , ):
lowercase :Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase :Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase :str = self.regnet(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , training=_lowerCAmelCase )
lowercase :str = outputs.pooler_output if return_dict else outputs[1]
lowercase :Optional[Any] = self.classifier[0](_lowerCAmelCase )
lowercase :int = self.classifier[1](_lowerCAmelCase )
lowercase :Dict = None if labels is None else self.hf_compute_loss(labels=_lowerCAmelCase , logits=_lowerCAmelCase )
if not return_dict:
lowercase :Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states )
| 158 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def a ( A__ : int , A__ : int , A__ : int , A__ : int , A__ : int , A__ : int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
_lowercase =ksize + 1
_lowercase =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A__ ):
for x in range(A__ ):
# distance from center
_lowercase =x - ksize // 2
_lowercase =y - ksize // 2
# degree to radiant
_lowercase =theta / 180 * np.pi
_lowercase =np.cos(_theta )
_lowercase =np.sin(_theta )
# get kernel x
_lowercase =cos_theta * px + sin_theta * py
# get kernel y
_lowercase =-sin_theta * px + cos_theta * py
# fill kernel
_lowercase =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowercase_ = imread('../image_data/lena.jpg')
# turn image in gray scale value
lowercase_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowercase_ = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
lowercase_ = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowercase_ = out / out.max() * 2_5_5
lowercase_ = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 205 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __lowerCAmelCase ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
def __init__( self , lowerCAmelCase=None , **lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(features=lowerCAmelCase )
_lowercase =torch_tensor_kwargs
import torch # noqa import torch at initialization
def A__ ( self , lowerCAmelCase ) -> int:
'''simple docstring'''
import torch
if isinstance(lowerCAmelCase , lowerCAmelCase ) and column:
if all(
isinstance(lowerCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCAmelCase )
return column
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
import torch
if isinstance(lowerCAmelCase , (str, bytes, type(lowerCAmelCase )) ):
return value
elif isinstance(lowerCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_lowercase ={}
if isinstance(lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_lowercase ={'dtype': torch.intaa}
elif isinstance(lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_lowercase ={'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCAmelCase , PIL.Image.Image ):
_lowercase =np.asarray(lowerCAmelCase )
return torch.tensor(lowerCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCAmelCase , '__array__' ) and not isinstance(lowerCAmelCase , torch.Tensor ):
_lowercase =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCAmelCase ) for substruct in data_struct] )
elif isinstance(lowerCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCAmelCase ) for substruct in data_struct] )
return self._tensorize(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowerCAmelCase , map_list=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Mapping:
'''simple docstring'''
_lowercase =self.numpy_arrow_extractor().extract_row(lowerCAmelCase )
_lowercase =self.python_features_decoder.decode_row(lowerCAmelCase )
return self.recursive_tensorize(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> "torch.Tensor":
'''simple docstring'''
_lowercase =self.numpy_arrow_extractor().extract_column(lowerCAmelCase )
_lowercase =self.python_features_decoder.decode_column(lowerCAmelCase , pa_table.column_names[0] )
_lowercase =self.recursive_tensorize(lowerCAmelCase )
_lowercase =self._consolidate(lowerCAmelCase )
return column
def A__ ( self , lowerCAmelCase ) -> Mapping:
'''simple docstring'''
_lowercase =self.numpy_arrow_extractor().extract_batch(lowerCAmelCase )
_lowercase =self.python_features_decoder.decode_batch(lowerCAmelCase )
_lowercase =self.recursive_tensorize(lowerCAmelCase )
for column_name in batch:
_lowercase =self._consolidate(batch[column_name] )
return batch
| 205 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase__):
_SCREAMING_SNAKE_CASE : str = (PNDMScheduler,)
_SCREAMING_SNAKE_CASE : str = (("""num_inference_steps""", 50),)
def UpperCamelCase__ ( self , **_UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = {
"num_train_timesteps": 10_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , _UpperCamelCase=0 , **_UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop('num_inference_steps' , lowercase_ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config(**lowercase_ )
lowerCAmelCase__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
lowerCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
lowerCAmelCase__ = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase__ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , _UpperCamelCase=0 , **_UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop('num_inference_steps' , lowercase_ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
lowerCAmelCase__ = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase__ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase__ = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , **_UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(**lowercase_ )
lowerCAmelCase__ = scheduler_class(**lowercase_ )
lowerCAmelCase__ = 10
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
lowerCAmelCase__ = model(lowercase_ , lowercase_ )
lowerCAmelCase__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowerCAmelCase__ = model(lowercase_ , lowercase_ )
lowerCAmelCase__ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop('num_inference_steps' , lowercase_ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**lowercase_ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , 'set_timesteps' ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , 'set_timesteps' ):
lowerCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase__ = dummy_past_residuals[:]
lowerCAmelCase__ = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase__ = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase__ = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
lowerCAmelCase__ = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
lowerCAmelCase__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowerCAmelCase__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase_ ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.full_loop()
lowerCAmelCase__ = torch.sum(torch.abs(lowercase_ ) )
lowerCAmelCase__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.full_loop(prediction_type='v_prediction' )
lowerCAmelCase__ = torch.sum(torch.abs(lowercase_ ) )
lowerCAmelCase__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
lowerCAmelCase__ = torch.sum(torch.abs(lowercase_ ) )
lowerCAmelCase__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
lowerCAmelCase__ = torch.sum(torch.abs(lowercase_ ) )
lowerCAmelCase__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 351 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case : Any = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def _UpperCamelCase ( UpperCamelCase_ : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowerCAmelCase__ = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
lowerCAmelCase__ = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
lowerCAmelCase__ = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(f'Job {i:>2} is {job[0]} at {job[1]}')
| 122 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__lowerCAmelCase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = field(default=__lowerCAmelCase , metadata={"""help""": """The input training data file (a text file)."""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__lowerCAmelCase = field(
default=__lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
if self.train_file is not None:
UpperCamelCase = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = 42
__lowerCAmelCase = True
__lowerCAmelCase = None
__lowerCAmelCase = None
def __call__( self : List[str] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase = [feature.pop(lowerCamelCase_ ) for feature in features]
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = len(features[0]["""input_ids"""] )
UpperCamelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase_ )] for feature in features
]
UpperCamelCase = list(chain(*lowerCamelCase_ ) )
UpperCamelCase = self.tokenizer.pad(
lowerCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase = {k: v.view(lowerCamelCase_ , lowerCamelCase_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase = torch.tensor(lowerCamelCase_ , dtype=torch.intaa )
return batch
def lowercase( ) -> int:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , UpperCamelCase_ , UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase_ )
datasets.utils.logging.set_verbosity(UpperCamelCase_ )
transformers.utils.logging.set_verbosity(UpperCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase = {}
if data_args.train_file is not None:
UpperCamelCase = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase = data_args.validation_file
UpperCamelCase = data_args.train_file.split(""".""" )[-1]
UpperCamelCase = load_dataset(
UpperCamelCase_ , data_files=UpperCamelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase = [f"""ending{i}""" for i in range(4 )]
UpperCamelCase = """sent1"""
UpperCamelCase = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCamelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase_ ):
UpperCamelCase = [[context] * 4 for context in examples[context_name]]
UpperCamelCase = examples[question_header_name]
UpperCamelCase = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(UpperCamelCase_ )
]
# Flatten out
UpperCamelCase = list(chain(*UpperCamelCase_ ) )
UpperCamelCase = list(chain(*UpperCamelCase_ ) )
# Tokenize
UpperCamelCase = tokenizer(
UpperCamelCase_ , UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase = min(len(UpperCamelCase_ ) , data_args.max_train_samples )
UpperCamelCase = train_dataset.select(range(UpperCamelCase_ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase = train_dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase = min(len(UpperCamelCase_ ) , data_args.max_eval_samples )
UpperCamelCase = eval_dataset.select(range(UpperCamelCase_ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase = eval_dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase_ ):
UpperCamelCase , UpperCamelCase = eval_predictions
UpperCamelCase = np.argmax(UpperCamelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase = Trainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase_ , data_collator=UpperCamelCase_ , compute_metrics=UpperCamelCase_ , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=UpperCamelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase = train_result.metrics
UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase_ )
)
UpperCamelCase = min(UpperCamelCase_ , len(UpperCamelCase_ ) )
trainer.log_metrics("""train""" , UpperCamelCase_ )
trainer.save_metrics("""train""" , UpperCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase = trainer.evaluate()
UpperCamelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase_ )
UpperCamelCase = min(UpperCamelCase_ , len(UpperCamelCase_ ) )
trainer.log_metrics("""eval""" , UpperCamelCase_ )
trainer.save_metrics("""eval""" , UpperCamelCase_ )
UpperCamelCase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase_ )
else:
trainer.create_model_card(**UpperCamelCase_ )
def lowercase( UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 343 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase_ , )
assert hasattr(self , """env""" )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = {
"""enabled""": True,
"""processes_per_host""": 8,
}
UpperCamelCase = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
UpperCamelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
UpperCamelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version="""py36""" , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self.create_estimator(lowerCamelCase_ )
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
| 343 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE ) as metadata_file:
snake_case_ = json.load(SCREAMING_SNAKE_CASE )
snake_case_ = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
snake_case_ = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module''']
# Load the entity vocab file
snake_case_ = load_original_entity_vocab(SCREAMING_SNAKE_CASE )
# add an entry for [MASK2]
snake_case_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case_ = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case_ = AddedToken('''<ent>''' , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE )
snake_case_ = AddedToken('''<ent2>''' , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f:
snake_case_ = json.load(SCREAMING_SNAKE_CASE )
snake_case_ = '''MLukeTokenizer'''
with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
snake_case_ = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
snake_case_ = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
snake_case_ = state_dict['''embeddings.word_embeddings.weight''']
snake_case_ = word_emb[ent_init_index].unsqueeze(0 )
snake_case_ = word_emb[enta_init_index].unsqueeze(0 )
snake_case_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case_ = state_dict[bias_name]
snake_case_ = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case_ = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case_ = f'''encoder.layer.{layer_index}.attention.self.'''
snake_case_ = state_dict[prefix + matrix_name]
snake_case_ = state_dict[prefix + matrix_name]
snake_case_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case_ = state_dict['''entity_embeddings.entity_embeddings.weight''']
snake_case_ = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
snake_case_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case_ = state_dict['''entity_predictions.bias''']
snake_case_ = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
snake_case_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case_ = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
snake_case_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
snake_case_ = state_dict[key]
else:
snake_case_ = state_dict[key]
snake_case_ , snake_case_ = model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
if set(SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(SCREAMING_SNAKE_CASE ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case_ = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , task='''entity_classification''' )
snake_case_ = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
snake_case_ = (0, 9)
snake_case_ = tokenizer(SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
snake_case_ = model(**SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case_ = torch.Size((1, 33, 768) )
snake_case_ = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case_ = torch.Size((1, 1, 768) )
snake_case_ = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case_ = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
snake_case_ = '''Tokyo is the capital of <mask>.'''
snake_case_ = (24, 30)
snake_case_ = tokenizer(SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
snake_case_ = model(**SCREAMING_SNAKE_CASE )
snake_case_ = encoding['''input_ids'''][0].tolist()
snake_case_ = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
snake_case_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE )
snake_case_ = outputs.entity_logits[0][0].argmax().item()
snake_case_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(SCREAMING_SNAKE_CASE ) )
model.save_pretrained(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Union[str, Any]:
"""simple docstring"""
snake_case_ = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
snake_case_ = [json.loads(SCREAMING_SNAKE_CASE ) for line in open(SCREAMING_SNAKE_CASE )]
snake_case_ = {}
for entry in data:
snake_case_ = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case_ = entity_id
break
snake_case_ = f'''{language}:{entity_name}'''
snake_case_ = entity_id
return new_mapping
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
UpperCAmelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 267 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCAmelCase ()-> Optional[Any]:
"""simple docstring"""
snake_case_ = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return image
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
snake_case_ = dct.pop(SCREAMING_SNAKE_CASE )
snake_case_ = val
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Dict:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
snake_case_ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
snake_case_ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
snake_case_ = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE , requires_grad=SCREAMING_SNAKE_CASE ), v_bias) )
snake_case_ = qkv_bias
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
snake_case_ = 364 if '''coco''' in model_name else 224
snake_case_ = BlipaVisionConfig(image_size=SCREAMING_SNAKE_CASE ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
snake_case_ = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=SCREAMING_SNAKE_CASE ).to_dict()
elif "opt-6.7b" in model_name:
snake_case_ = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=SCREAMING_SNAKE_CASE ).to_dict()
elif "t5-xl" in model_name:
snake_case_ = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
snake_case_ = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
snake_case_ = BlipaConfig(vision_config=SCREAMING_SNAKE_CASE , text_config=SCREAMING_SNAKE_CASE )
return config, image_size
@torch.no_grad()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False )-> List[Any]:
"""simple docstring"""
snake_case_ = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
snake_case_ = tokenizer('''\n''' , add_special_tokens=SCREAMING_SNAKE_CASE ).input_ids[0]
snake_case_ , snake_case_ = get_blipa_config(SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE )
snake_case_ = BlipaForConditionalGeneration(SCREAMING_SNAKE_CASE ).eval()
snake_case_ = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
snake_case_ , snake_case_ = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
snake_case_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
snake_case_ , snake_case_ , snake_case_ = load_model_and_preprocess(
name=SCREAMING_SNAKE_CASE , model_type=SCREAMING_SNAKE_CASE , is_eval=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
original_model.eval()
print('''Done!''' )
# update state dict keys
snake_case_ = original_model.state_dict()
snake_case_ = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE )
if key.startswith('''Qformer.bert''' ):
snake_case_ = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
snake_case_ = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
snake_case_ = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
snake_case_ = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
snake_case_ = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
snake_case_ = key.replace('''t5''' , '''language''' )
snake_case_ = val
# read in qv biases
read_in_q_v_bias(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ , snake_case_ = hf_model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
snake_case_ = load_demo_image()
snake_case_ = vis_processors['''eval'''](SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE )
snake_case_ = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE )
# create processor
snake_case_ = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE )
snake_case_ = BlipaProcessor(image_processor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
snake_case_ = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values.to(SCREAMING_SNAKE_CASE )
# make sure processor creates exact same pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
original_model.to(SCREAMING_SNAKE_CASE )
hf_model.to(SCREAMING_SNAKE_CASE )
with torch.no_grad():
if "opt" in model_name:
snake_case_ = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
snake_case_ = hf_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).logits
else:
snake_case_ = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
snake_case_ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
snake_case_ = hf_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
snake_case_ = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=SCREAMING_SNAKE_CASE )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
snake_case_ = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=SCREAMING_SNAKE_CASE )
else:
# cast to same type
snake_case_ = logits.dtype
assert torch.allclose(original_logits.to(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
snake_case_ = ''''''
snake_case_ = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE )
snake_case_ = original_model.generate({'''image''': original_pixel_values} )
snake_case_ = hf_model.generate(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , SCREAMING_SNAKE_CASE )
snake_case_ = input_ids.shape[1]
snake_case_ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=SCREAMING_SNAKE_CASE )
snake_case_ = [text.strip() for text in output_text]
print('''HF generation:''' , SCREAMING_SNAKE_CASE )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 267 | 1 |
def UpperCamelCase ( lowerCAmelCase__ = 100_0000 ):
'''simple docstring'''
lowercase = set(range(3 , lowerCAmelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCAmelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCAmelCase__ , lowerCAmelCase__ ) ) )
lowercase = [float(lowerCAmelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase__ , limit + 1 , lowerCAmelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 101 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a : List[str] = "src/diffusers"
a : str = "."
# This is to make sure the diffusers module imported is the one in the repo.
a : Tuple = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
a : List[str] = spec.loader.load_module()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , __lowerCamelCase ) is not None
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = object_name.split(""".""" )
__UpperCAmelCase : List[Any] = 0
# First let's find the module where our object lives.
__UpperCAmelCase : Optional[Any] = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase , f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__UpperCAmelCase : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
__UpperCAmelCase : List[str] = """"""
__UpperCAmelCase : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__UpperCAmelCase : List[str] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index] , __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__UpperCAmelCase : Dict = lines[start_index:line_index]
return "".join(__lowerCamelCase )
a : Any = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
a : Optional[int] = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
a : Dict = re.compile(r"<FILL\s+[^>]*>")
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
__UpperCAmelCase : Optional[Any] = code.split("""\n""" )
__UpperCAmelCase : str = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
__UpperCAmelCase : Tuple = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
__UpperCAmelCase : Optional[Any] = f"""class Bla:\n{code}"""
__UpperCAmelCase : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__lowerCamelCase )
__UpperCAmelCase : Dict = black.format_str(__lowerCamelCase , mode=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Any = style_docstrings_in_code(__lowerCamelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=False ):
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__UpperCAmelCase : Optional[Any] = f.readlines()
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
__UpperCAmelCase : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = search.groups()
__UpperCAmelCase : Any = find_code_in_diffusers(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = get_indent(__lowerCamelCase )
__UpperCAmelCase : Tuple = line_index + 1 if indent == theoretical_indent else line_index + 2
__UpperCAmelCase : Any = theoretical_indent
__UpperCAmelCase : Any = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__UpperCAmelCase : int = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
__UpperCAmelCase : List[Any] = lines[line_index]
__UpperCAmelCase : str = _should_continue(__lowerCamelCase , __lowerCamelCase ) and re.search(f"""^{indent}# End copy""" , __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__UpperCAmelCase : Optional[int] = lines[start_index:line_index]
__UpperCAmelCase : int = """""".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
__UpperCAmelCase : Tuple = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
__UpperCAmelCase : List[Any] = """\n""".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : List[str] = replace_pattern.replace("""with""" , """""" ).split(""",""" )
__UpperCAmelCase : Any = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = pattern.groups()
__UpperCAmelCase : List[str] = re.sub(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if option.strip() == "all-casing":
__UpperCAmelCase : List[Any] = re.sub(obja.lower() , obja.lower() , __lowerCamelCase )
__UpperCAmelCase : int = re.sub(obja.upper() , obja.upper() , __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__UpperCAmelCase : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
__UpperCAmelCase : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__UpperCAmelCase : int = lines[:start_index] + [theoretical_code] + lines[line_index:]
__UpperCAmelCase : Union[str, Any] = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ ( __lowerCamelCase : bool = False ):
__UpperCAmelCase : Tuple = glob.glob(os.path.join(__lowerCamelCase , """**/*.py""" ) , recursive=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = []
for filename in all_files:
__UpperCAmelCase : str = is_copy_consistent(__lowerCamelCase , __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Union[str, Any] = """\n""".join(__lowerCamelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[int] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 114 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__UpperCAmelCase : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__UpperCAmelCase : List[Any] = json.load(f)
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return FSMTTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = FSMTForConditionalGeneration.from_pretrained(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 26.0],
['''ru-en''', 22.0],
['''en-de''', 22.0],
['''de-en''', 29.0],
] )
@slow
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = f"""facebook/wmt19-{pair}"""
UpperCamelCase : str = self.get_tokenizer(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = self.get_model(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = bleu_data[pair]['''src''']
UpperCamelCase : Optional[Any] = bleu_data[pair]['''tgt''']
UpperCamelCase : Dict = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , truncation=__SCREAMING_SNAKE_CASE , padding='''longest''' ).to(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCamelCase : Tuple = tokenizer.batch_decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = calculate_bleu(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(__SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(scores['''bleu'''] , __SCREAMING_SNAKE_CASE )
| 315 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
UpperCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 315 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a : Dict = logging.get_logger(__name__)
__a : Dict = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : List[str] = '''convbert'''
def __init__( self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=7_68 , lowerCAmelCase__=2 , lowerCAmelCase__=9 , lowerCAmelCase__=1 , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = embedding_size
__lowercase = head_ratio
__lowercase = conv_kernel_size
__lowercase = num_groups
__lowercase = classifier_dropout
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] ) | 210 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a : Optional[int] = logging.get_logger(__name__)
__a : Tuple = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Dict = '''data2vec-text'''
def __init__( self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 210 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
lowercase : Any = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE__ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = AutoConfig.from_pretrained('''gpt2''' )
lowercase : Dict = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = GenerationConfig()
lowercase : List[Any] = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowercase : Tuple = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = generation_config.update(**SCREAMING_SNAKE_CASE__ )
# update_kwargs was not modified (no side effects)
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(SCREAMING_SNAKE_CASE__ , {'''foo''': '''bar'''} )
def __lowerCamelCase ( self ):
lowercase : Any = GenerationConfig()
lowercase : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowercase : List[Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE__ )
assert not hasattr(SCREAMING_SNAKE_CASE__ , '''foo''' ) # no new kwargs should be initialized if from config
def __lowerCamelCase ( self ):
lowercase : Tuple = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE__ )
self.assertEqual(default_config.num_beams , 1 )
lowercase : str = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE__ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE__ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Any = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE__ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@classmethod
def __lowerCamelCase ( cls ):
lowercase : str = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCamelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def __lowerCamelCase ( self ):
lowercase : Dict = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowercase : Optional[Any] = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE__ , repo_id='''test-generation-config''' , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
lowercase : Optional[int] = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCamelCase ( self ):
lowercase : Tuple = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE__ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowercase : Tuple = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE__ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
lowercase : Union[str, Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
| 359 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Tuple = 'pegasus'
A : int = ['past_key_values']
A : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE__=50265 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ , ):
lowercase : List[Any] = vocab_size
lowercase : List[Any] = max_position_embeddings
lowercase : Dict = d_model
lowercase : Optional[Any] = encoder_ffn_dim
lowercase : int = encoder_layers
lowercase : str = encoder_attention_heads
lowercase : Tuple = decoder_ffn_dim
lowercase : List[str] = decoder_layers
lowercase : List[Any] = decoder_attention_heads
lowercase : Tuple = dropout
lowercase : int = attention_dropout
lowercase : Optional[Any] = activation_dropout
lowercase : Dict = activation_function
lowercase : Optional[Any] = init_std
lowercase : Tuple = encoder_layerdrop
lowercase : Optional[int] = decoder_layerdrop
lowercase : List[Any] = use_cache
lowercase : Any = encoder_layers
lowercase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
| 173 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCAmelCase = 50_0000
_lowerCAmelCase , _lowerCAmelCase = os.path.split(__file__)
_lowerCAmelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = dataset.map(**UpperCamelCase )
@get_duration
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = dataset.filter(**UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ : List[str] = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowerCAmelCase__ : Union[str, Any] = generate_example_dataset(
os.path.join(UpperCamelCase , """dataset.arrow""" ) , UpperCamelCase , num_examples=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=UpperCamelCase )
def tokenize(UpperCamelCase ):
return tokenizer(examples["""text"""] )
lowerCAmelCase__ : int = map(UpperCamelCase )
lowerCAmelCase__ : List[Any] = map(UpperCamelCase , batched=UpperCamelCase )
lowerCAmelCase__ : int = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
with dataset.formatted_as(type="""numpy""" ):
lowerCAmelCase__ : Optional[Any] = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
with dataset.formatted_as(type="""pandas""" ):
lowerCAmelCase__ : Dict = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowerCAmelCase__ : Any = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowerCAmelCase__ : Dict = map(UpperCamelCase , function=lambda UpperCamelCase : None , batched=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = map(UpperCamelCase , function=UpperCamelCase , batched=UpperCamelCase )
lowerCAmelCase__ : Any = filter(UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCamelCase , """wb""" ) as f:
f.write(json.dumps(UpperCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 37 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = '''xlm-roberta-xl'''
def __init__( self ,__UpperCAmelCase=25_0880 ,__UpperCAmelCase=2560 ,__UpperCAmelCase=36 ,__UpperCAmelCase=32 ,__UpperCAmelCase=1_0240 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=514 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-05 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0 ,__UpperCAmelCase=2 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> str:
super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Tuple = layer_norm_eps
lowerCAmelCase__ : int = position_embedding_type
lowerCAmelCase__ : Optional[Any] = use_cache
lowerCAmelCase__ : Optional[Any] = classifier_dropout
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase__ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase__ : Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 37 | 1 |
'''simple docstring'''
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
UpperCamelCase = 6
UpperCamelCase = 1
UpperCamelCase = 1_901
UpperCamelCase = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
UpperCamelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
UpperCamelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
UpperCamelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
UpperCamelCase = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 249 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : Any = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 249 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def lowercase ( a__ : Dict , a__ : int=False , a__ : int=False , a__ : int=False ) -> Tuple:
_UpperCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def lowercase ( a__ : List[str] , a__ : List[Any] ) -> List[str]:
for i in range(config.num_hidden_layers ):
_UpperCamelCase = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
_UpperCamelCase = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCamelCase = in_proj_bias[: config.hidden_size]
_UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase = in_proj_bias[-config.hidden_size :]
def lowercase ( a__ : int ) -> Any:
_UpperCamelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def lowercase ( a__ : int , a__ : List[Any] , a__ : List[str] ) -> Dict:
_UpperCamelCase = dct.pop(a__ )
_UpperCamelCase = val
@torch.no_grad()
def lowercase ( a__ : int , a__ : Union[str, Any] ) -> Any:
_UpperCamelCase = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=a__ )
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
if "vqa" in checkpoint_url:
_UpperCamelCase = True
_UpperCamelCase = 3129
_UpperCamelCase = 'huggingface/label-files'
_UpperCamelCase = 'vqa2-id2label.json'
_UpperCamelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='''dataset''' ) , '''r''' ) )
_UpperCamelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = ViltForQuestionAnswering(a__ )
elif "nlvr" in checkpoint_url:
_UpperCamelCase = True
_UpperCamelCase = 2
_UpperCamelCase = {0: 'False', 1: 'True'}
_UpperCamelCase = {v: k for k, v in config.idalabel.items()}
_UpperCamelCase = 3
_UpperCamelCase = ViltForImagesAndTextClassification(a__ )
elif "irtr" in checkpoint_url:
_UpperCamelCase = True
_UpperCamelCase = ViltForImageAndTextRetrieval(a__ )
elif "mlm_itm" in checkpoint_url:
_UpperCamelCase = True
_UpperCamelCase = ViltForMaskedLM(a__ )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
_UpperCamelCase = torch.hub.load_state_dict_from_url(a__ , map_location='''cpu''' )['state_dict']
_UpperCamelCase = create_rename_keys(a__ , a__ , a__ , a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
read_in_q_k_v(a__ , a__ )
if mlm_model or irtr_model:
_UpperCamelCase = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a__ , a__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_UpperCamelCase = model.load_state_dict(a__ , strict=a__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a__ )
# Define processor
_UpperCamelCase = ViltImageProcessor(size=384 )
_UpperCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_UpperCamelCase = ViltProcessor(a__ , a__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
_UpperCamelCase = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=a__ ).raw )
_UpperCamelCase = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=a__ ).raw )
_UpperCamelCase = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
_UpperCamelCase = processor(a__ , a__ , return_tensors='''pt''' )
_UpperCamelCase = processor(a__ , a__ , return_tensors='''pt''' )
_UpperCamelCase = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_UpperCamelCase = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=a__ ).raw )
if mlm_model:
_UpperCamelCase = 'a bunch of [MASK] laying on a [MASK].'
else:
_UpperCamelCase = 'How many cats are there?'
_UpperCamelCase = processor(a__ , a__ , return_tensors='''pt''' )
_UpperCamelCase = model(**a__ )
# Verify outputs
if mlm_model:
_UpperCamelCase = torch.Size([1, 11, 30522] )
_UpperCamelCase = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a__ , atol=1e-4 )
# verify masked token prediction equals "cats"
_UpperCamelCase = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_UpperCamelCase = torch.Size([1, 3129] )
_UpperCamelCase = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a__ , atol=1e-4 )
# verify vqa prediction equals "2"
_UpperCamelCase = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_UpperCamelCase = torch.Size([1, 2] )
_UpperCamelCase = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a__ ).mkdir(exist_ok=a__ )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCAmelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 256 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int], _snake_case : List[Any], _snake_case : str=7, _snake_case : Tuple=3, _snake_case : List[str]=3_0, _snake_case : Tuple=4_0_0, _snake_case : Any=True, _snake_case : List[Any]=None, _snake_case : int=0.9, _snake_case : Optional[Any]=None, _snake_case : str=True, _snake_case : Union[str, Any]=[0.5, 0.5, 0.5], _snake_case : Union[str, Any]=[0.5, 0.5, 0.5], ) ->List[Any]:
snake_case__ : int = size if size is not None else {'shortest_edge': 3_0}
snake_case__ : Tuple = crop_size if crop_size is not None else {'height': 3_0, 'width': 3_0}
snake_case__ : Union[str, Any] = parent
snake_case__ : Dict = batch_size
snake_case__ : int = num_channels
snake_case__ : Tuple = min_resolution
snake_case__ : Any = max_resolution
snake_case__ : List[Any] = do_resize_and_center_crop
snake_case__ : str = size
snake_case__ : str = crop_pct
snake_case__ : List[str] = crop_size
snake_case__ : Optional[int] = do_normalize
snake_case__ : Tuple = image_mean
snake_case__ : Tuple = image_std
def lowercase_ ( self : Optional[int] ) ->int:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PoolFormerImageProcessor if is_vision_available() else None
def lowercase_ ( self : Union[str, Any] ) ->Dict:
snake_case__ : Union[str, Any] = PoolFormerImageProcessingTester(self )
@property
def lowercase_ ( self : int ) ->Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Union[str, Any] ) ->Optional[int]:
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case, 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_snake_case, 'size' ) )
self.assertTrue(hasattr(_snake_case, 'crop_pct' ) )
self.assertTrue(hasattr(_snake_case, 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case, 'image_mean' ) )
self.assertTrue(hasattr(_snake_case, 'image_std' ) )
def lowercase_ ( self : List[str] ) ->List[str]:
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 3_0} )
self.assertEqual(image_processor.crop_size, {'height': 3_0, 'width': 3_0} )
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict, size=4_2, crop_size=8_4 )
self.assertEqual(image_processor.size, {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size, {'height': 8_4, 'width': 8_4} )
def lowercase_ ( self : List[Any] ) ->List[Any]:
pass
def lowercase_ ( self : List[str] ) ->str:
# Initialize image_processing
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
snake_case__ : str = image_processing(_snake_case, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def lowercase_ ( self : int ) ->List[Any]:
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, np.ndarray )
# Test not batched input
snake_case__ : Dict = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
snake_case__ : List[Any] = image_processing(_snake_case, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def lowercase_ ( self : List[str] ) ->List[str]:
# Initialize image_processing
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
snake_case__ : Optional[Any] = image_processing(_snake_case, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
| 277 | 0 |
import numpy as np
import qiskit
def _lowerCAmelCase ( __lowerCAmelCase = 8 , __lowerCAmelCase = None ) -> str:
"""simple docstring"""
snake_case__ : List[str] = np.random.default_rng(seed=__lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
snake_case__ : Dict = 6 * key_len
# Measurement basis for Alice's qubits.
snake_case__ : List[Any] = rng.integers(2 , size=__lowerCAmelCase )
# The set of states Alice will prepare.
snake_case__ : List[str] = rng.integers(2 , size=__lowerCAmelCase )
# Measurement basis for Bob's qubits.
snake_case__ : Optional[Any] = rng.integers(2 , size=__lowerCAmelCase )
# Quantum Circuit to simulate BB84
snake_case__ : Union[str, Any] = qiskit.QuantumCircuit(__lowerCAmelCase , name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(__lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
snake_case__ : int = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
snake_case__ : Any = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1 , seed_simulator=__lowerCAmelCase )
# Returns the result of measurement.
snake_case__ : str = job.result().get_counts(__lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
snake_case__ : Optional[Any] = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
snake_case__ : List[str] = gen_key[:key_len] if len(__lowerCAmelCase ) >= key_len else gen_key.ljust(__lowerCAmelCase , '''0''' )
return key
if __name__ == "__main__":
print(f"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 44 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A__ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['''DPTFeatureExtractor''']
A__ = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 44 | 1 |
from __future__ import annotations
from math import pi
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : List[Any] , __lowerCamelCase : Callable , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[dict] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : List[Any] , ):
super().__init__(
features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase :Union[str, Any] = Generator(
cache_dir=__lowerCamelCase , features=__lowerCamelCase , generator=__lowerCamelCase , gen_kwargs=__lowerCamelCase , **__lowerCamelCase , )
def _A ( self : List[str] ):
# Build iterable dataset
if self.streaming:
UpperCamelCase :Any = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
UpperCamelCase :Tuple = None
UpperCamelCase :Dict = None
UpperCamelCase :Dict = None
UpperCamelCase :List[str] = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
UpperCamelCase :Tuple = self.builder.as_dataset(
split="""train""" , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 38 | 0 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCAmelCase = False
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = 'ybelkada/fonts'
def lowercase__ ( ):
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'Pix2StructImageProcessor. Please upgrade torch.' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[str] , __snake_case : Any ):
'''simple docstring'''
requires_backends(__snake_case , ['torch'] )
_check_torch_version()
UpperCAmelCase_ : List[str] = image_tensor.unsqueeze(0 )
UpperCAmelCase_ : int = torch.nn.functional.unfold(__snake_case , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCAmelCase_ : Dict = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __snake_case , __snake_case , -1 )
UpperCAmelCase_ : Optional[int] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowercase__ ( __snake_case : str , __snake_case : int = 36 , __snake_case : str = "black" , __snake_case : str = "white" , __snake_case : int = 5 , __snake_case : int = 5 , __snake_case : int = 5 , __snake_case : int = 5 , __snake_case : Optional[bytes] = None , __snake_case : Optional[str] = None , ):
'''simple docstring'''
requires_backends(__snake_case , 'vision' )
# Add new lines so that each line is no more than 80 characters.
UpperCAmelCase_ : str = textwrap.TextWrapper(width=80 )
UpperCAmelCase_ : Dict = wrapper.wrap(text=__snake_case )
UpperCAmelCase_ : int = '\n'.join(__snake_case )
if font_bytes is not None and font_path is None:
UpperCAmelCase_ : Optional[Any] = io.BytesIO(__snake_case )
elif font_path is not None:
UpperCAmelCase_ : Tuple = font_path
else:
UpperCAmelCase_ : List[str] = hf_hub_download(__snake_case , 'Arial.TTF' )
UpperCAmelCase_ : List[str] = ImageFont.truetype(__snake_case , encoding='UTF-8' , size=__snake_case )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCAmelCase_ : Optional[int] = ImageDraw.Draw(Image.new('RGB' , (1, 1) , __snake_case ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = temp_draw.textbbox((0, 0) , __snake_case , __snake_case )
# Create the actual image with a bit of padding around the text.
UpperCAmelCase_ : Any = text_width + left_padding + right_padding
UpperCAmelCase_ : str = text_height + top_padding + bottom_padding
UpperCAmelCase_ : List[str] = Image.new('RGB' , (image_width, image_height) , __snake_case )
UpperCAmelCase_ : Tuple = ImageDraw.Draw(__snake_case )
draw.text(xy=(left_padding, top_padding) , text=__snake_case , fill=__snake_case , font=__snake_case )
return image
def lowercase__ ( __snake_case : np.ndarray , __snake_case : str , **__snake_case : List[Any] ):
'''simple docstring'''
requires_backends(__snake_case , 'vision' )
# Convert to PIL image if necessary
UpperCAmelCase_ : List[Any] = to_pil_image(__snake_case )
UpperCAmelCase_ : Dict = render_text(__snake_case , **__snake_case )
UpperCAmelCase_ : List[str] = max(header_image.width , image.width )
UpperCAmelCase_ : int = int(image.height * (new_width / image.width) )
UpperCAmelCase_ : Any = int(header_image.height * (new_width / header_image.width) )
UpperCAmelCase_ : List[Any] = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCAmelCase_ : int = to_numpy_array(__snake_case )
if infer_channel_dimension_format(__snake_case ) == ChannelDimension.LAST:
UpperCAmelCase_ : Tuple = to_channel_dimension_format(__snake_case , ChannelDimension.LAST )
return new_image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Tuple = ['''flattened_patches''']
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = 2_0_4_8 , _UpperCamelCase = False , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6}
UpperCAmelCase_ : int = do_normalize
UpperCAmelCase_ : Tuple = do_convert_rgb
UpperCAmelCase_ : str = max_patches
UpperCAmelCase_ : Any = is_vqa
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> np.ndarray:
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
UpperCAmelCase_ : int = to_channel_dimension_format(_UpperCamelCase , ChannelDimension.FIRST )
UpperCAmelCase_ : Optional[int] = torch.from_numpy(_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = patch_size['height'], patch_size['width']
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_image_size(_UpperCamelCase )
# maximize scale s.t.
UpperCAmelCase_ : Dict = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCAmelCase_ : List[Any] = max(min(math.floor(scale * image_height / patch_height ) , _UpperCamelCase ) , 1 )
UpperCAmelCase_ : Union[str, Any] = max(min(math.floor(scale * image_width / patch_width ) , _UpperCamelCase ) , 1 )
UpperCAmelCase_ : Dict = max(num_feasible_rows * patch_height , 1 )
UpperCAmelCase_ : Union[str, Any] = max(num_feasible_cols * patch_width , 1 )
UpperCAmelCase_ : Optional[Any] = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=_UpperCamelCase , antialias=_UpperCamelCase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCAmelCase_ : Optional[int] = torch_extract_patches(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = patches.shape
UpperCAmelCase_ : List[Any] = patches_shape[1]
UpperCAmelCase_ : Dict = patches_shape[2]
UpperCAmelCase_ : Any = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCAmelCase_ : Tuple = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCAmelCase_ : Union[str, Any] = torch.arange(_UpperCamelCase ).reshape([rows, 1] ).repeat(1 , _UpperCamelCase ).reshape([rows * columns, 1] )
UpperCAmelCase_ : Union[str, Any] = torch.arange(_UpperCamelCase ).reshape([1, columns] ).repeat(_UpperCamelCase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCAmelCase_ : Tuple = row_ids.to(torch.floataa )
UpperCAmelCase_ : List[Any] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCAmelCase_ : Tuple = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCAmelCase_ : Dict = torch.nn.functional.pad(_UpperCamelCase , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCAmelCase_ : int = to_numpy_array(_UpperCamelCase )
return result
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> np.ndarray:
if image.dtype == np.uinta:
UpperCAmelCase_ : List[str] = image.astype(np.floataa )
# take mean across the whole `image`
UpperCAmelCase_ : List[str] = np.mean(_UpperCamelCase )
UpperCAmelCase_ : List[str] = np.std(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = max(_UpperCamelCase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ) -> ImageInput:
UpperCAmelCase_ : str = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ : int = patch_size if patch_size is not None else self.patch_size
UpperCAmelCase_ : List[str] = max_patches if max_patches is not None else self.max_patches
UpperCAmelCase_ : List[str] = self.is_vqa
if kwargs.get('data_format' , _UpperCamelCase ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
UpperCAmelCase_ : Dict = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ : List[str] = [convert_to_rgb(_UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ : int = [to_numpy_array(_UpperCamelCase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('font_bytes' , _UpperCamelCase )
UpperCAmelCase_ : str = kwargs.pop('font_path' , _UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = [header_text] * len(_UpperCamelCase )
UpperCAmelCase_ : Dict = [
render_header(_UpperCamelCase , header_text[i] , font_bytes=_UpperCamelCase , font_path=_UpperCamelCase )
for i, image in enumerate(_UpperCamelCase )
]
if do_normalize:
UpperCAmelCase_ : List[Any] = [self.normalize(image=_UpperCamelCase ) for image in images]
# convert to torch tensor and permute
UpperCAmelCase_ : List[Any] = [
self.extract_flattened_patches(image=_UpperCamelCase , max_patches=_UpperCamelCase , patch_size=_UpperCamelCase )
for image in images
]
# create attention mask in numpy
UpperCAmelCase_ : Optional[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCAmelCase_ : List[str] = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=_UpperCamelCase )
return encoded_outputs
| 145 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 145 | 1 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def A_ ( a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DatasetInfosDict.from_directory(a )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=4_2 , ),
] , )
def A_ ( a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = str(a )
dataset_info.write_to_directory(a )
SCREAMING_SNAKE_CASE_ : Tuple = DatasetInfo.from_directory(a )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(a , 'dataset_info.json' ) )
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(a ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
SCREAMING_SNAKE_CASE_ : str = yaml.safe_dump(a )
SCREAMING_SNAKE_CASE_ : Dict = yaml.safe_load(a )
assert dataset_info_yaml_dict == reloaded
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = DatasetInfo()
SCREAMING_SNAKE_CASE_ : str = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=4_2 ),
'v2': DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def A_ ( a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = str(a )
dataset_infos_dict.write_to_directory(a )
SCREAMING_SNAKE_CASE_ : List[Any] = DatasetInfosDict.from_directory(a )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
SCREAMING_SNAKE_CASE_ : List[str] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(a , 'README.md' ) )
| 253 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A ( __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : str = AudioLDMPipeline
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_AUDIO_PARAMS
SCREAMING_SNAKE_CASE : Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Dict = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
])
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
SCREAMING_SNAKE_CASE_ : List[str] = ClapTextModelWithProjection(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 )
SCREAMING_SNAKE_CASE_ : Any = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = SpeechTaHifiGan(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[Any] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 256
SCREAMING_SNAKE_CASE_ : int = audio[:10]
SCREAMING_SNAKE_CASE_ : str = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : str = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.audios[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ : List[str] = text_inputs['input_ids'].to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.text_encoder(
_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE_ : Tuple = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = prompt_embeds
# forward
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE_ : str = negative_prompt
SCREAMING_SNAKE_CASE_ : List[Any] = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = output.audios[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE_ : List[str] = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_inputs['input_ids'].to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = audioldm_pipe.text_encoder(
_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE_ : Any = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
embeds.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = embeds
# forward
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = 'egg cracking'
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe(**_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 256
SCREAMING_SNAKE_CASE_ : Optional[Any] = audio[:10]
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=2 , num_waveforms_per_prompt=_SCREAMING_SNAKE_CASE ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE_ : str = 2
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_SCREAMING_SNAKE_CASE ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = audioldm_pipe(audio_length_in_s=0.016 , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(audio_length_in_s=0.032 , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) / vocoder_sampling_rate == 0.032
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = ['hey']
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.audios.shape
assert audio_shape == (1, 256)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE_ : int = SpeechTaHifiGan(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=1 )
SCREAMING_SNAKE_CASE_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
@slow
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 8, 128, 16) )
SCREAMING_SNAKE_CASE_ : Tuple = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE_ : Optional[int] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = 25
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(**_SCREAMING_SNAKE_CASE ).audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 8_1920
SCREAMING_SNAKE_CASE_ : Any = audio[7_7230:7_7240]
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
SCREAMING_SNAKE_CASE_ : int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE_ : Optional[int] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = audioldm_pipe(**_SCREAMING_SNAKE_CASE ).audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 8_1920
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audio[2_7780:2_7790]
SCREAMING_SNAKE_CASE_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
SCREAMING_SNAKE_CASE_ : str = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 253 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = '''gptj'''
UpperCamelCase_ : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str , UpperCAmelCase_ : Optional[int]=5_0400 , UpperCAmelCase_ : Tuple=2048 , UpperCAmelCase_ : int=4096 , UpperCAmelCase_ : Union[str, Any]=28 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Dict="gelu_new" , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : List[str]=1E-5 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=5_0256 , UpperCAmelCase_ : Tuple=5_0256 , UpperCAmelCase_ : int=False , **UpperCAmelCase_ : Optional[Any] , ):
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Any = n_positions
SCREAMING_SNAKE_CASE : List[Any] = n_embd
SCREAMING_SNAKE_CASE : Optional[Any] = n_layer
SCREAMING_SNAKE_CASE : Union[str, Any] = n_head
SCREAMING_SNAKE_CASE : Any = n_inner
SCREAMING_SNAKE_CASE : Tuple = rotary_dim
SCREAMING_SNAKE_CASE : Any = activation_function
SCREAMING_SNAKE_CASE : Any = resid_pdrop
SCREAMING_SNAKE_CASE : int = embd_pdrop
SCREAMING_SNAKE_CASE : Optional[Any] = attn_pdrop
SCREAMING_SNAKE_CASE : int = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , **UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : PretrainedConfig , UpperCAmelCase_ : str = "default" , UpperCAmelCase_ : List[PatchingSpec] = None , UpperCAmelCase_ : bool = False , ):
super().__init__(UpperCAmelCase_ , task=UpperCAmelCase_ , patching_specs=UpperCAmelCase_ , use_past=UpperCAmelCase_ )
if not getattr(self._config , "pad_token_id" , UpperCAmelCase_ ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE : List[str] = 0
@property
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction="inputs" )
SCREAMING_SNAKE_CASE : Any = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE : Tuple = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _A ( self : Any ):
return self._config.n_layer
@property
def _A ( self : Dict ):
return self._config.n_head
def _A ( self : List[str] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE : str = super(UpperCAmelCase_ , self ).generate_dummy_inputs(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_ )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE : str = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : Optional[Any] = seqlen + 2
SCREAMING_SNAKE_CASE : Dict = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE : Tuple = [
(torch.zeros(UpperCAmelCase_ ), torch.zeros(UpperCAmelCase_ )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE : Any = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE : Union[str, Any] = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ , dtype=UpperCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def _A ( self : str ):
return 13
| 319 |
def lowerCamelCase__ ( lowercase , lowercase = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = length or len(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : str = True
return list_data if not swapped else bubble_sort(lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
"""simple docstring"""
def A__ ( UpperCamelCase ):
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
A = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A = 1
if upper_limit > 0:
A = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(UpperCamelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
_snake_case : Union[str, Any] = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 292 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
lowerCAmelCase_ = {'''mobilebert-uncased''': 5_1_2}
lowerCAmelCase_ = {}
class __lowerCAmelCase ( lowercase__ ):
lowerCamelCase_ : Any = VOCAB_FILES_NAMES
lowerCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[Any] = MobileBertTokenizer
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__="[UNK]" , __magic_name__="[SEP]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> Any:
'''simple docstring'''
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
snake_case_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _a ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _a ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _a ) != tokenize_chinese_chars
):
snake_case_ : Optional[Any] = getattr(_a , normalizer_state.pop('''type''' ) )
snake_case_ : Dict = do_lower_case
snake_case_ : str = strip_accents
snake_case_ : Tuple = tokenize_chinese_chars
snake_case_ : Optional[Any] = normalizer_class(**_a )
snake_case_ : str = do_lower_case
def lowerCamelCase (self , __magic_name__ , __magic_name__=None ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = [self.sep_token_id]
snake_case_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple:
'''simple docstring'''
snake_case_ : int = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 355 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 279 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCAmelCase_ ( _lowercase : str = "isbn/0140328726") -> dict:
"""simple docstring"""
a__ : str = olid.strip().strip("""/""") # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""") != 1:
a__ : Optional[int] = F'''{olid} is not a valid Open Library olid'''
raise ValueError(_lowercase)
return requests.get(F'''https://openlibrary.org/{new_olid}.json''').json()
def lowerCAmelCase_ ( _lowercase : dict) -> dict:
"""simple docstring"""
a__ : str = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
a__ : List[str] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
a__ : List[Any] = [
get_openlibrary_data(author["""key"""])["""name"""] for author in data["""Authors"""]
]
a__ : Dict = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(_lowercase , _lowercase):
a__ : Tuple = """, """.join(_lowercase)
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase : Optional[Any] =input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
_lowercase : Any =summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print("\n".join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 170 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :int = MODEL_FOR_CAUSAL_LM_MAPPING
__lowerCAmelCase :Dict = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
a__ : Union[str, Any] = text_generator("""This is a test""" , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
a__ : Dict = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__lowercase , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
a__ : List[str] = text_generator("""This is a test""" , do_sample=__lowercase , num_return_sequences=2 , return_tensors=__lowercase )
self.assertEqual(
__lowercase , [
{"""generated_token_ids""": ANY(__lowercase )},
{"""generated_token_ids""": ANY(__lowercase )},
] , )
a__ : List[Any] = text_generator.model.config.eos_token_id
a__ : List[str] = """<pad>"""
a__ : int = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowercase , )
self.assertEqual(
__lowercase , [
[
{"""generated_token_ids""": ANY(__lowercase )},
{"""generated_token_ids""": ANY(__lowercase )},
],
[
{"""generated_token_ids""": ANY(__lowercase )},
{"""generated_token_ids""": ANY(__lowercase )},
],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[int] = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
a__ : Dict = text_generator("""This is a test""" , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
a__ : int = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase ) -> str:
"""simple docstring"""
a__ : Any = TextGenerationPipeline(model=__lowercase , tokenizer=__lowercase )
return text_generator, ["This is a test", "Another test"]
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[Any] = """Hello I believe in"""
a__ : Optional[Any] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
a__ : Tuple = text_generator(__lowercase )
self.assertEqual(
__lowercase , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
a__ : Any = text_generator(__lowercase , stop_sequence=""" fe""" )
self.assertEqual(__lowercase , [{"""generated_text""": """Hello I believe in fe"""}] )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
a__ : Optional[int] = text_generator.model
a__ : List[str] = text_generator.tokenizer
a__ : Optional[Any] = text_generator("""This is a test""" )
self.assertEqual(__lowercase , [{"""generated_text""": ANY(__lowercase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
a__ : Tuple = text_generator("""This is a test""" , return_full_text=__lowercase )
self.assertEqual(__lowercase , [{"""generated_text""": ANY(__lowercase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
a__ : List[str] = pipeline(task="""text-generation""" , model=__lowercase , tokenizer=__lowercase , return_full_text=__lowercase )
a__ : Tuple = text_generator("""This is a test""" )
self.assertEqual(__lowercase , [{"""generated_text""": ANY(__lowercase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
a__ : str = text_generator("""This is a test""" , return_full_text=__lowercase )
self.assertEqual(__lowercase , [{"""generated_text""": ANY(__lowercase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
a__ : Union[str, Any] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
[{"""generated_text""": ANY(__lowercase )}, {"""generated_text""": ANY(__lowercase )}],
[{"""generated_text""": ANY(__lowercase )}, {"""generated_text""": ANY(__lowercase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
a__ : List[str] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__lowercase )
self.assertEqual(
__lowercase , [
[{"""generated_text""": ANY(__lowercase )}, {"""generated_text""": ANY(__lowercase )}],
[{"""generated_text""": ANY(__lowercase )}, {"""generated_text""": ANY(__lowercase )}],
] , )
with self.assertRaises(__lowercase ):
a__ : Any = text_generator("""test""" , return_full_text=__lowercase , return_text=__lowercase )
with self.assertRaises(__lowercase ):
a__ : str = text_generator("""test""" , return_full_text=__lowercase , return_tensors=__lowercase )
with self.assertRaises(__lowercase ):
a__ : Any = text_generator("""test""" , return_text=__lowercase , return_tensors=__lowercase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
a__ : List[Any] = text_generator("""""" )
self.assertEqual(__lowercase , [{"""generated_text""": ANY(__lowercase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
a__ : Tuple = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
a__ : Tuple = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 5_0_0 , max_new_tokens=2_0 )
a__ : Any = text_generator("""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__lowercase ):
text_generator(
"""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
import torch
# Classic `model_kwargs`
a__ : Optional[Any] = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
a__ : Optional[int] = pipe("""This is a test""" )
self.assertEqual(
__lowercase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
a__ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
a__ : Dict = pipe("""This is a test""" )
self.assertEqual(
__lowercase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
a__ : Tuple = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
a__ : int = pipe("""This is a test""" )
self.assertEqual(
__lowercase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
import torch
a__ : Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
import torch
a__ : List[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__lowercase , top_p=0.5 )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = """Hello world"""
a__ : str = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
a__ : Dict = logging.get_logger("""transformers.generation.tf_utils""" )
else:
a__ : Dict = logging.get_logger("""transformers.generation.utils""" )
a__ : Optional[Any] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__lowercase ) as cl:
a__ : Any = text_generator(__lowercase , max_length=1_0 , max_new_tokens=1 )
self.assertIn(__lowercase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__lowercase ) as cl:
a__ : int = text_generator(__lowercase , max_new_tokens=1 )
self.assertNotIn(__lowercase , cl.out )
with CaptureLogger(__lowercase ) as cl:
a__ : List[str] = text_generator(__lowercase , max_length=1_0 )
self.assertNotIn(__lowercase , cl.out )
| 170 | 1 |
import math
import sys
def __A ( a_ :int) -> Optional[Any]:
if number != int(__UpperCamelCase):
raise ValueError('''the value of input must be a natural number''')
if number < 0:
raise ValueError('''the value of input must not be a negative number''')
if number == 0:
return 1
__a : Any = [-1] * (number + 1)
__a : str = 0
for i in range(1 , number + 1):
__a : Optional[int] = sys.maxsize
__a : List[str] = int(math.sqrt(__UpperCamelCase))
for j in range(1 , root + 1):
__a : int = 1 + answers[i - (j**2)]
__a : Any = min(__UpperCamelCase , __UpperCamelCase)
__a : Tuple = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 369 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''openai/whisper-base'''
__lowerCAmelCase = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
__lowerCAmelCase = '''transcriber'''
__lowerCAmelCase = WhisperProcessor
__lowerCAmelCase = WhisperForConditionalGeneration
__lowerCAmelCase = ['''audio''']
__lowerCAmelCase = ['''text''']
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.pre_processor(_UpperCAmelCase , return_tensors='''pt''' ).input_features
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.model.generate(inputs=_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.pre_processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )[0] | 188 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
_lowerCAmelCase = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase )
DownloadCommand.register_subcommand(lowerCAmelCase )
EnvironmentCommand.register_subcommand(lowerCAmelCase )
RunCommand.register_subcommand(lowerCAmelCase )
ServeCommand.register_subcommand(lowerCAmelCase )
UserCommands.register_subcommand(lowerCAmelCase )
AddNewModelCommand.register_subcommand(lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase )
LfsCommands.register_subcommand(lowerCAmelCase )
PTtoTFCommand.register_subcommand(lowerCAmelCase )
# Let's go
_lowerCAmelCase = parser.parse_args()
if not hasattr(lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
_lowerCAmelCase = args.func(lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 70 |
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = val
__a = None
__a = None
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__a = Node(__lowercase )
else:
self.left.insert(__lowercase )
elif val > self.val:
if self.right is None:
__a = Node(__lowercase )
else:
self.right.insert(__lowercase )
else:
__a = val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if root:
inorder(root.left , _SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return arr
__a = Node(arr[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
__a = []
inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 302 | 0 |
def a ( SCREAMING_SNAKE_CASE_ : int = 1_0_0_0 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 315 |
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ):
"""simple docstring"""
UpperCamelCase : List[str] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 315 | 1 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
lowerCAmelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
lowerCAmelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = ''' Hello world! cécé herlolip'''
lowerCAmelCase__ = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def snake_case_ ( A_ : str, A_ : Optional[Any], A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : str = dct.pop(A_ )
_lowerCamelCase : List[Any] = val
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )
_lowerCamelCase : Optional[int] = torch.hub.load('''pytorch/fairseq''', '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = emb.weight.shape
_lowerCamelCase : Any = nn.Linear(A_, A_, bias=A_ )
_lowerCamelCase : Optional[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def snake_case_ ( A_ : Optional[Any], A_ : List[str], A_ : Union[str, Any]=None ):
'''simple docstring'''
if not os.path.exists(A_ ):
_lowerCamelCase : List[Any] = torch.hub.load('''pytorch/fairseq''', A_ ).eval()
else:
_lowerCamelCase : Optional[int] = load_xsum_checkpoint(A_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_lowerCamelCase : int = checkpoint_path.replace('''.''', '''-''' )
_lowerCamelCase : List[str] = BartConfig.from_pretrained(A_ )
_lowerCamelCase : Any = bart.encode(A_ ).unsqueeze(0 )
_lowerCamelCase : Any = BartTokenizer.from_pretrained(A_ ).encode(A_, return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(A_, A_ ).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
_lowerCamelCase : str = bart.state_dict()
remove_ignore_keys_(A_ )
_lowerCamelCase : int = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(A_, A_, A_ )
_lowerCamelCase : Tuple = BartForSequenceClassification(A_ ).eval()
model.load_state_dict(A_ )
_lowerCamelCase : str = bart.predict('''mnli''', A_, return_logits=A_ )
_lowerCamelCase : List[Any] = model(A_ )[0] # logits
else: # no classification heads to worry about
_lowerCamelCase : Any = bart.model.state_dict()
remove_ignore_keys_(A_ )
_lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight''']
_lowerCamelCase : Dict = bart.extract_features(A_ )
if hf_checkpoint_name == "facebook/bart-large":
_lowerCamelCase : str = BartModel(A_ ).eval()
model.load_state_dict(A_ )
_lowerCamelCase : Dict = model(A_ ).model[0]
else:
_lowerCamelCase : str = BartForConditionalGeneration(A_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(A_ )
if hasattr(A_, '''lm_head''' ):
_lowerCamelCase : Tuple = make_linear_from_emb(model.model.shared )
_lowerCamelCase : str = model.model(A_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
lowerCAmelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 72 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __snake_case ( _lowercase):
snake_case__ : List[str] = "unispeech"
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=1_2 , __lowerCAmelCase : int=3_0_7_2 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Dict=1E-5 , __lowerCAmelCase : Optional[int]="group" , __lowerCAmelCase : Dict="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=1_2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[int]=1_0 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[str]=3_2_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Tuple=1_0_0 , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict="mean" , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=2_5_6 , __lowerCAmelCase : Dict=8_0 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Any=0.5 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase )
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Any = feat_extract_norm
_lowerCamelCase : List[Any] = feat_extract_activation
_lowerCamelCase : Any = list(__lowerCAmelCase )
_lowerCamelCase : Tuple = list(__lowerCAmelCase )
_lowerCamelCase : int = list(__lowerCAmelCase )
_lowerCamelCase : List[str] = conv_bias
_lowerCamelCase : List[str] = num_conv_pos_embeddings
_lowerCamelCase : Tuple = num_conv_pos_embedding_groups
_lowerCamelCase : List[str] = len(self.conv_dim )
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Tuple = hidden_dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Optional[Any] = feat_proj_dropout
_lowerCamelCase : Optional[int] = final_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : List[str] = num_ctc_classes
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = do_stable_layer_norm
_lowerCamelCase : Tuple = use_weighted_layer_sum
_lowerCamelCase : List[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Any = apply_spec_augment
_lowerCamelCase : Dict = mask_time_prob
_lowerCamelCase : List[str] = mask_time_length
_lowerCamelCase : Optional[Any] = mask_time_min_masks
_lowerCamelCase : List[str] = mask_feature_prob
_lowerCamelCase : int = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCamelCase : Optional[Any] = num_codevectors_per_group
_lowerCamelCase : int = num_codevector_groups
_lowerCamelCase : List[Any] = contrastive_logits_temperature
_lowerCamelCase : List[str] = feat_quantizer_dropout
_lowerCamelCase : Dict = num_negatives
_lowerCamelCase : Optional[int] = codevector_dim
_lowerCamelCase : List[Any] = proj_codevector_dim
_lowerCamelCase : List[Any] = diversity_loss_weight
# ctc loss
_lowerCamelCase : Union[str, Any] = ctc_loss_reduction
_lowerCamelCase : Any = ctc_zero_infinity
# pretraining loss
_lowerCamelCase : str = replace_prob
@property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 72 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( a_: str = 10, a_: Optional[Any] = 22 ):
_UpperCAmelCase : Optional[Any] = range(1, a_ )
_UpperCAmelCase : List[str] = range(1, a_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(10, 22) = }') | 357 | '''simple docstring'''
def __UpperCAmelCase ( a_: int, a_: int ):
if not isinstance(a_, a_ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(a_, a_ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
_UpperCAmelCase : List[str] = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(a_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Dict , A : List[Any] , A : Tuple=7 , A : Dict=3 , A : Optional[Any]=18 , A : Union[str, Any]=30 , A : List[str]=4_00 , A : List[Any]=True , A : Union[str, Any]=32 , A : Any=True , ) -> List[Any]:
lowercase_ : Optional[Any] = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : Any = num_channels
lowercase_ : List[str] = image_size
lowercase_ : Optional[int] = min_resolution
lowercase_ : Dict = max_resolution
lowercase_ : str = do_resize
lowercase_ : Optional[Any] = size_divisor
lowercase_ : List[Any] = do_rescale
def A ( self : Dict ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = GLPNImageProcessor if is_vision_available() else None
def A ( self : Union[str, Any] ) -> List[str]:
lowercase_ : Any = GLPNImageProcessingTester(self )
@property
def A ( self : int ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[str] ) -> int:
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size_divisor''' ) )
self.assertTrue(hasattr(A , '''resample''' ) )
self.assertTrue(hasattr(A , '''do_rescale''' ) )
def A ( self : Any ) -> str:
pass
def A ( self : Tuple ) -> Tuple:
# Initialize image_processing
lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def A ( self : List[Any] ) -> Tuple:
# Initialize image_processing
lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def A ( self : Optional[int] ) -> List[Any]:
# Initialize image_processing
lowercase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 33 |
import datasets
from .evaluate import evaluate
A : Dict = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
A : Any = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
A : str = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowercase ( datasets.Metric):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
lowerCamelCase__ : Any = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
lowerCamelCase__ : Optional[int] = evaluate(dataset=__lowerCamelCase , predictions=__lowerCamelCase )
return score
| 184 | 0 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase :Dict = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a :
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = PegasusConfig
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : Any = "gelu"
def __init__( self : List[Any] , snake_case : int , snake_case : int=13 , snake_case : Optional[Any]=7 , snake_case : Union[str, Any]=True , snake_case : int=False , snake_case : int=99 , snake_case : Optional[int]=32 , snake_case : Union[str, Any]=5 , snake_case : List[str]=4 , snake_case : Union[str, Any]=37 , snake_case : Union[str, Any]=0.1 , snake_case : str=0.1 , snake_case : Any=20 , snake_case : int=2 , snake_case : Dict=1 , snake_case : Any=0 , ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : List[str] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = max_position_embeddings
__UpperCAmelCase : Optional[Any] = eos_token_id
__UpperCAmelCase : Dict = pad_token_id
__UpperCAmelCase : Dict = bos_token_id
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__UpperCAmelCase : int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCAmelCase : Optional[Any] = np.concatenate([input_ids, eos_tensor] , axis=1 )
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase : List[str] = prepare_pegasus_inputs_dict(snake_case , snake_case , snake_case )
return config, inputs_dict
def lowerCamelCase__ ( self : int , snake_case : Dict , snake_case : Tuple , snake_case : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = 20
__UpperCAmelCase : Tuple = model_class_name(snake_case )
__UpperCAmelCase : Dict = model.encode(inputs_dict['''input_ids'''] )
__UpperCAmelCase , __UpperCAmelCase : int = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__UpperCAmelCase : Any = model.init_cache(decoder_input_ids.shape[0] , snake_case , snake_case )
__UpperCAmelCase : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__UpperCAmelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , snake_case , decoder_attention_mask=snake_case , past_key_values=snake_case , decoder_position_ids=snake_case , )
__UpperCAmelCase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase : Dict = model.decode(
decoder_input_ids[:, -1:] , snake_case , decoder_attention_mask=snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=snake_case , )
__UpperCAmelCase : Optional[Any] = model.decode(snake_case , snake_case )
__UpperCAmelCase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def lowerCamelCase__ ( self : Optional[int] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : int ) -> Any:
__UpperCAmelCase : Tuple = 20
__UpperCAmelCase : Optional[int] = model_class_name(snake_case )
__UpperCAmelCase : int = model.encode(inputs_dict['''input_ids'''] )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__UpperCAmelCase : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , snake_case , snake_case )
__UpperCAmelCase : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , snake_case , decoder_attention_mask=snake_case , past_key_values=snake_case , decoder_position_ids=snake_case , )
__UpperCAmelCase : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase : str = model.decode(
decoder_input_ids[:, -1:] , snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=snake_case , decoder_position_ids=snake_case , )
__UpperCAmelCase : Optional[Any] = model.decode(snake_case , snake_case , decoder_attention_mask=snake_case )
__UpperCAmelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def _a ( _lowercase : Optional[int] , _lowercase : Any , _lowercase : int , _lowercase : List[Any]=None , _lowercase : Union[str, Any]=None , ):
'''simple docstring'''
if attention_mask is None:
__UpperCAmelCase : Optional[Any] = np.not_equal(_lowercase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__UpperCAmelCase : Tuple = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a ( _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
__UpperCAmelCase : List[str] = FlaxPegasusModelTester(self )
__UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=snake_case )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Dict ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(snake_case , snake_case , snake_case )
def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(snake_case , snake_case , snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : Any = self._prepare_for_class(snake_case , snake_case )
__UpperCAmelCase : List[str] = model_class(snake_case )
@jax.jit
def encode_jitted(snake_case : Any , snake_case : List[str]=None , **snake_case : List[Any] ):
return model.encode(input_ids=snake_case , attention_mask=snake_case )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase : Optional[Any] = encode_jitted(**snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase : List[Any] = encode_jitted(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) )
for jitted_output, output in zip(snake_case , snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : Optional[int] = model_class(snake_case )
__UpperCAmelCase : Dict = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__UpperCAmelCase : List[Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(snake_case : Dict , snake_case : Dict , snake_case : List[str] ):
return model.decode(
decoder_input_ids=snake_case , decoder_attention_mask=snake_case , encoder_outputs=snake_case , )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase : List[str] = decode_jitted(**snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase : Union[str, Any] = decode_jitted(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) )
for jitted_output, output in zip(snake_case , snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Dict = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=snake_case )
__UpperCAmelCase : Union[str, Any] = np.ones((1, 1) )
__UpperCAmelCase : str = model(snake_case )
self.assertIsNotNone(snake_case )
@slow
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
__UpperCAmelCase : List[str] = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
__UpperCAmelCase : Optional[Any] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__UpperCAmelCase : Optional[Any] = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
__UpperCAmelCase : Tuple = tokenizer(snake_case , return_tensors='''np''' , truncation=snake_case , max_length=512 , padding=snake_case )
__UpperCAmelCase : Tuple = model.generate(**snake_case , num_beams=2 ).sequences
__UpperCAmelCase : List[Any] = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
assert tgt_text == decoded | 240 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__UpperCAmelCase :Dict = logging.get_logger(__name__)
__UpperCAmelCase :str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase :Optional[int] = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase :Union[str, Any] = {
"bert-base-uncased": 5_1_2,
"bert-large-uncased": 5_1_2,
"bert-base-cased": 5_1_2,
"bert-large-cased": 5_1_2,
"bert-base-multilingual-uncased": 5_1_2,
"bert-base-multilingual-cased": 5_1_2,
"bert-base-chinese": 5_1_2,
"bert-base-german-cased": 5_1_2,
"bert-large-uncased-whole-word-masking": 5_1_2,
"bert-large-cased-whole-word-masking": 5_1_2,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_1_2,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_1_2,
"bert-base-cased-finetuned-mrpc": 5_1_2,
"bert-base-german-dbmdz-cased": 5_1_2,
"bert-base-german-dbmdz-uncased": 5_1_2,
"TurkuNLP/bert-base-finnish-cased-v1": 5_1_2,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_1_2,
"wietsedv/bert-base-dutch-cased": 5_1_2,
}
__UpperCAmelCase :str = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = BertTokenizer
def __init__( self : Optional[int] , snake_case : Union[str, Any]=None , snake_case : str=None , snake_case : Any=True , snake_case : Tuple="[UNK]" , snake_case : int="[SEP]" , snake_case : Optional[Any]="[PAD]" , snake_case : int="[CLS]" , snake_case : Optional[Any]="[MASK]" , snake_case : Union[str, Any]=True , snake_case : List[Any]=None , **snake_case : int , ) -> str:
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
__UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , snake_case ) != tokenize_chinese_chars
):
__UpperCAmelCase : List[Any] = getattr(snake_case , normalizer_state.pop('''type''' ) )
__UpperCAmelCase : List[Any] = do_lower_case
__UpperCAmelCase : List[Any] = strip_accents
__UpperCAmelCase : str = tokenize_chinese_chars
__UpperCAmelCase : List[str] = normalizer_class(**snake_case )
__UpperCAmelCase : Optional[int] = do_lower_case
def lowerCamelCase__ ( self : List[Any] , snake_case : Tuple , snake_case : List[str]=None ) -> str:
__UpperCAmelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : List[str] = [self.sep_token_id]
__UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : List[Any] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case ) | 240 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowercase = logging.get_logger(__name__)
def __UpperCAmelCase ( a_):
if isinstance(a_ , np.ndarray):
return list(tensor.shape)
snake_case_ = tf.shape(a_)
if tensor.shape == tf.TensorShape(a_):
return dynamic
snake_case_ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(a_)]
def __UpperCAmelCase ( a_ , a_ = None , a_ = None):
return tf.nn.softmax(logits=logits + 1E-9 , axis=a_ , name=a_)
def __UpperCAmelCase ( a_ , a_ , a_ , a_=1E-5 , a_=-1):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(a_ , a_):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.')
# Get mean and variance on the axis to be normalized
snake_case_ , snake_case_ = tf.nn.moments(a_ , axes=[axis] , keepdims=a_)
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
snake_case_ = [1] * inputs.shape.rank
snake_case_ = shape_list(a_)[axis]
snake_case_ = tf.reshape(a_ , a_)
snake_case_ = tf.reshape(a_ , a_)
# Compute layer normalization using the batch_normalization
# function.
snake_case_ = tf.nn.batch_normalization(
a_ , a_ , a_ , offset=a_ , scale=a_ , variance_epsilon=a_ , )
return outputs
def __UpperCAmelCase ( a_ , a_=0 , a_=-1):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
snake_case_ = tf.shape(a_)
snake_case_ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1])
snake_case_ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0)
return tf.reshape(a_ , a_)
def __UpperCAmelCase ( a_):
if not isinstance(a_ , tf.Tensor):
snake_case_ = tf.convert_to_tensor(a_) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
snake_case_ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
snake_case_ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
snake_case_ = (
tf.cast(1 , encoder_attention_mask.dtype) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __UpperCAmelCase ( a_ , a_ , a_ = "input_ids"):
tf.debugging.assert_less(
a_ , tf.cast(a_ , dtype=tensor.dtype) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(a_)}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
snake_case_ = [x for x in data if len(a_) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''')
snake_case_ = np.asarray(a_)
snake_case_ = 1
snake_case_ = np.array_split(a_ , a_)
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data):
num_chunks += 1
snake_case_ = np.array_split(a_ , a_)
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(a_):
snake_case_ = chunk_data
else:
snake_case_ = data
def __UpperCAmelCase ( a_ , a_):
if name in group.attrs:
snake_case_ = [n.decode('utf8') if hasattr(a_ , 'decode') else n for n in group.attrs[name]]
else:
snake_case_ = []
snake_case_ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8') if hasattr(a_ , 'decode') else n for n in group.attrs['%s%d' % (name, chunk_id)]])
chunk_id += 1
return data
def __UpperCAmelCase ( a_):
def _expand_single_ad_tensor(a_):
if isinstance(a_ , tf.Tensor) and t.shape.rank == 1:
return tf.expand_dims(a_ , axis=-1)
return t
return tf.nest.map_structure(_expand_single_ad_tensor , a_)
| 178 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {"vocab_file": "spiece.model"}
lowercase = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
lowercase = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = []
def __init__( self , a , a="<unk>" , a="<s>" , a="</s>" , a="<pad>" , a="[SEP]" , a="[MASK]" , a="[CLS]" , a = None , **a , ) -> None:
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , pad_token=a , sep_token=a , mask_token=a , cls_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def _UpperCamelCase ( self ) -> Tuple:
return self.sp_model.get_piece_size()
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self , a ) -> Optional[Any]:
snake_case_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _UpperCamelCase ( self , a ) -> Dict:
return self.sp_model.piece_to_id(a )
def _UpperCamelCase ( self , a ) -> Union[str, Any]:
snake_case_ = self.sp_model.IdToPiece(a )
return token
def _UpperCamelCase ( self , a ) -> List[Any]:
snake_case_ = []
snake_case_ = ''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(a )
snake_case_ = False
out_string += self.sp_model.decode(a )
return out_string.strip()
def _UpperCamelCase ( self , a , a = False , a = None , a = True , **a , ) -> str:
snake_case_ = kwargs.pop('use_source_tokenizer' , a )
snake_case_ = self.convert_ids_to_tokens(a , skip_special_tokens=a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ = []
snake_case_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a ) )
snake_case_ = []
sub_texts.append(a )
else:
current_sub_text.append(a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case_ = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(a ) )
else:
snake_case_ = ''.join(a )
snake_case_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ = self.clean_up_tokenization(a )
return clean_text
else:
return text
def _UpperCamelCase ( self , a , a = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def _UpperCamelCase ( self , a , a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCamelCase ( self , a , a = None , a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def _UpperCamelCase ( self , a , a = None ) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 178 | 1 |
UpperCAmelCase__ = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
UpperCAmelCase__ = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def A ( _UpperCAmelCase : float , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> float:
'''simple docstring'''
_UpperCAmelCase = from_type.lower().strip('s' )
_UpperCAmelCase = to_type.lower().strip('s' )
_UpperCAmelCase = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
F"Invalid 'from_type' value: {from_type!r}.\n"
F"Conversion abbreviations are: {', '.join(_UpperCAmelCase )}"
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
F"Invalid 'to_type' value: {to_type!r}.\n"
F"Conversion abbreviations are: {', '.join(_UpperCAmelCase )}"
)
raise ValueError(_UpperCAmelCase )
_UpperCAmelCase = METRIC_CONVERSION[from_sanitized]
_UpperCAmelCase = METRIC_CONVERSION[to_sanitized]
_UpperCAmelCase = 1
if from_exponent > to_exponent:
_UpperCAmelCase = from_exponent - to_exponent
else:
_UpperCAmelCase = -(to_exponent - from_exponent)
return value * pow(10 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 361 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def A ( ) -> tuple[list[int], int]:
'''simple docstring'''
_UpperCAmelCase = [randint(-1_000 , 1_000 ) for i in range(10 )]
_UpperCAmelCase = randint(-5_000 , 5_000 )
return (arr, r)
UpperCAmelCase__ = make_dataset()
def A ( _UpperCAmelCase : list[int] , _UpperCAmelCase : int ) -> tuple[int, ...]:
'''simple docstring'''
for triplet in permutations(_UpperCAmelCase , 3 ):
if sum(_UpperCAmelCase ) == target:
return tuple(sorted(_UpperCAmelCase ) )
return (0, 0, 0)
def A ( _UpperCAmelCase : list[int] , _UpperCAmelCase : int ) -> tuple[int, int, int]:
'''simple docstring'''
arr.sort()
_UpperCAmelCase = len(_UpperCAmelCase )
for i in range(n - 1 ):
_UpperCAmelCase , _UpperCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def A ( ) -> tuple[float, float]:
'''simple docstring'''
_UpperCAmelCase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
_UpperCAmelCase = '\ntriplet_sum1(*dataset)\n'
_UpperCAmelCase = '\ntriplet_sum2(*dataset)\n'
_UpperCAmelCase = repeat(setup=_UpperCAmelCase , stmt=_UpperCAmelCase , repeat=5 , number=10_000 )
_UpperCAmelCase = repeat(setup=_UpperCAmelCase , stmt=_UpperCAmelCase , repeat=5 , number=10_000 )
return (min(_UpperCAmelCase ), min(_UpperCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase__ = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 290 | 0 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __get__( self : str ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
UpperCAmelCase__ = '__cached_' + self.fget.__name__
UpperCAmelCase__ = getattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
if cached is None:
UpperCAmelCase__ = self.fget(lowerCamelCase__ )
setattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
return cached
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a_ ( lowerCamelCase ):
if is_torch_fx_proxy(lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCamelCase , np.ndarray )
def a_ ( lowerCamelCase ):
return isinstance(lowerCamelCase , np.ndarray )
def a_ ( lowerCamelCase ):
return _is_numpy(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
return isinstance(lowerCamelCase , torch.Tensor )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
return isinstance(lowerCamelCase , torch.device )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch_device(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
if isinstance(lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = getattr(lowerCamelCase , lowerCamelCase )
else:
return False
return isinstance(lowerCamelCase , torch.dtype )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch_dtype(lowerCamelCase )
def a_ ( lowerCamelCase ):
import tensorflow as tf
return isinstance(lowerCamelCase , tf.Tensor )
def a_ ( lowerCamelCase ):
return False if not is_tf_available() else _is_tensorflow(lowerCamelCase )
def a_ ( lowerCamelCase ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCamelCase , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(lowerCamelCase )
return type(lowerCamelCase ) == tf.Tensor
def a_ ( lowerCamelCase ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCamelCase )
def a_ ( lowerCamelCase ):
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCamelCase , jnp.ndarray )
def a_ ( lowerCamelCase ):
return False if not is_flax_available() else _is_jax(lowerCamelCase )
def a_ ( lowerCamelCase ):
if isinstance(lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(lowerCamelCase ) for k, v in obj.items()}
elif isinstance(lowerCamelCase , (list, tuple) ):
return [to_py_obj(lowerCamelCase ) for o in obj]
elif is_tf_tensor(lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCamelCase ):
return np.asarray(lowerCamelCase ).tolist()
elif isinstance(lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a_ ( lowerCamelCase ):
if isinstance(lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(lowerCamelCase ) for k, v in obj.items()}
elif isinstance(lowerCamelCase , (list, tuple) ):
return np.array(lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCamelCase ):
return np.asarray(lowerCamelCase )
else:
return obj
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = fields(self )
# Safety and consistency checks
if not len(lowerCamelCase__ ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase__ = getattr(self ,class_fields[0].name )
UpperCAmelCase__ = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCamelCase__ ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = first_field.items()
UpperCAmelCase__ = True
else:
try:
UpperCAmelCase__ = iter(lowerCamelCase__ )
UpperCAmelCase__ = True
except TypeError:
UpperCAmelCase__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCamelCase__ ):
if (
not isinstance(lowerCamelCase__ ,(list, tuple) )
or not len(lowerCamelCase__ ) == 2
or not isinstance(element[0] ,lowerCamelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
UpperCAmelCase__ = element[1]
elif first_field is not None:
UpperCAmelCase__ = first_field
else:
for field in class_fields:
UpperCAmelCase__ = getattr(self ,field.name )
if v is not None:
UpperCAmelCase__ = v
def __delitem__( self : Union[str, Any] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : Dict ):
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : Optional[int] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : str ):
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : str ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Union[str, Any] ):
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : str ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : List[Any] ):
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[str] ,lowerCamelCase__ : List[Any] ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[int] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCamelCase__ ,lowerCamelCase__ )
super().__setattr__(lowerCamelCase__ ,lowerCamelCase__ )
def __setitem__( self : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ):
# Will raise a KeyException if needed
super().__setitem__(lowerCamelCase__ ,lowerCamelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
return tuple(self[k] for k in self.keys() )
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
@classmethod
def __lowerCAmelCase ( cls : List[str] ,lowerCamelCase__ : List[Any] ):
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "longest"
snake_case__ = "max_length"
snake_case__ = "do_not_pad"
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "pt"
snake_case__ = "tf"
snake_case__ = "np"
snake_case__ = "jax"
class snake_case :
"""simple docstring"""
def __init__( self : Tuple ,lowerCamelCase__ : List[ContextManager] ):
UpperCAmelCase__ = context_managers
UpperCAmelCase__ = ExitStack()
def __enter__( self : Union[str, Any] ):
for context_manager in self.context_managers:
self.stack.enter_context(lowerCamelCase__ )
def __exit__( self : Union[str, Any] ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : Dict ):
self.stack.__exit__(*lowerCamelCase__ ,**lowerCamelCase__ )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = infer_framework(lowerCamelCase )
if framework == "tf":
UpperCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = model_class.__name__
UpperCAmelCase__ = infer_framework(lowerCamelCase )
if framework == "tf":
UpperCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a_ ( lowerCamelCase , lowerCamelCase = "" , lowerCamelCase = "." ):
def _flatten_dict(lowerCamelCase , lowerCamelCase="" , lowerCamelCase="." ):
for k, v in d.items():
UpperCAmelCase__ = str(lowerCamelCase ) + delimiter + str(lowerCamelCase ) if parent_key else k
if v and isinstance(lowerCamelCase , lowerCamelCase ):
yield from flatten_dict(lowerCamelCase , lowerCamelCase , delimiter=lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
@contextmanager
def a_ ( lowerCamelCase , lowerCamelCase = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a_ ( lowerCamelCase , lowerCamelCase=None ):
if is_numpy_array(lowerCamelCase ):
return np.transpose(lowerCamelCase , axes=lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.T if axes is None else array.permute(*lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.transpose(lowerCamelCase , perm=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.transpose(lowerCamelCase , axes=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.reshape(lowerCamelCase , lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.reshape(*lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.reshape(lowerCamelCase , lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.reshape(lowerCamelCase , lowerCamelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase=None ):
if is_numpy_array(lowerCamelCase ):
return np.squeeze(lowerCamelCase , axis=lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(lowerCamelCase , axis=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.squeeze(lowerCamelCase , axis=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.expand_dims(lowerCamelCase , lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.unsqueeze(dim=lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(lowerCamelCase , axis=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.expand_dims(lowerCamelCase , axis=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.size(lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.numel()
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.size(lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
for key, value in auto_map.items():
if isinstance(lowerCamelCase , (tuple, list) ):
UpperCAmelCase__ = [f'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase__ = f'''{repo_id}--{value}'''
return auto_map
def a_ ( lowerCamelCase ):
for base_class in inspect.getmro(lowerCamelCase ):
UpperCAmelCase__ = base_class.__module__
UpperCAmelCase__ = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 98 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ):
__a : set = set()
__a : int
__a : Fraction = Fraction(0 )
__a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__a : Union[str, Any] = x_num * y_den + x_den * y_num
__a : Optional[Any] = x_den * y_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
__a : int = x_num * y_num
__a : Optional[Any] = x_den * y_num + x_num * y_den
__a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : List[Any] = x_num * x_num * y_num * y_num
__a : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[str] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 27 | 0 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=7 ):
lowerCamelCase__ : List[str] = None
if token is not None:
lowerCamelCase__ : Dict = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
lowerCamelCase__ : List[Any] = '636036'
lowerCamelCase__ : int = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
lowerCamelCase__ : Optional[int] = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
return result["workflow_runs"]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = get_daily_ci_runs(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCamelCase__ : int = workflow_run['id']
break
return workflow_run_id
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = get_last_daily_ci_runs(_lowerCamelCase )
if workflow_run_id is not None:
lowerCamelCase__ : Optional[Any] = get_artifacts_links(worflow_run_id=_lowerCamelCase , token=_lowerCamelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCamelCase__ : int = artifacts_links[artifact_name]
download_artifact(
artifact_name=_lowerCamelCase , artifact_url=_lowerCamelCase , output_dir=_lowerCamelCase , token=_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
get_last_daily_ci_artifacts(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Optional[Any] = {}
for artifact_name in artifact_names:
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''{artifact_name}.zip''' )
if os.path.isfile(_lowerCamelCase ):
lowerCamelCase__ : Dict = {}
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
with z.open(_lowerCamelCase ) as f:
lowerCamelCase__ : str = f.read().decode('UTF-8' )
return results
| 369 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316 | 0 |
"""simple docstring"""
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowerCAmelCase = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class A_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =_str_to_version_tuple(self.version_str )
def __repr__( self :Optional[int] ):
"""simple docstring"""
return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
return self.major, self.minor, self.patch
def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :Tuple ):
"""simple docstring"""
if isinstance(A_ , A_ ):
return Version(A_ )
elif isinstance(A_ , A_ ):
return other
raise TypeError(f"""{other} (type {type(A_ )}) cannot be compared to version.""" )
def __eq__( self :List[Any] , lowerCamelCase_ :Dict ):
"""simple docstring"""
try:
lowerCamelCase__ : Union[str, Any] =self._validate_operand(A_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self :List[Any] , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self._validate_operand(A_ )
return self.tuple < other.tuple
def __hash__( self :Union[str, Any] ):
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCAmelCase__ ( cls :Any , lowerCamelCase_ :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] ={f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
return self.version_str
def lowerCAmelCase_ ( snake_case_ : List[str] ) ->Tuple:
lowerCamelCase__ : Union[str, Any] =_VERSION_REG.match(snake_case__ )
if not res:
raise ValueError(f"""Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def lowerCAmelCase_ ( snake_case_ : str ) ->List[str]:
return ".".join(str(snake_case__ ) for v in version_tuple ) | 126 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_lowercase = NewType('''DataClass''', Any)
_lowercase = NewType('''DataClassType''', Any)
def _snake_case ( snake_case__ : Tuple ):
if isinstance(snake_case__ , snake_case__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def _snake_case ( snake_case__ : list ):
A = {str(snake_case__ ): choice for choice in choices}
return lambda snake_case__ : str_to_choice.get(snake_case__ , snake_case__ )
def _snake_case ( *,
snake_case__ : Union[str, List[str]] = None , snake_case__ : str = None , snake_case__ : Any = dataclasses.MISSING , snake_case__ : Callable[[], Any] = dataclasses.MISSING , snake_case__ : dict = None , **snake_case__ : Any , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A = {}
if aliases is not None:
A = aliases
if help is not None:
A = help
return dataclasses.field(metadata=snake_case__ , default=snake_case__ , default_factory=snake_case__ , **snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Iterable[DataClassType]
def __init__( self : List[str] ,A_ : Union[DataClassType, Iterable[DataClassType]] ,**A_ : Any ) -> Optional[int]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
A = ArgumentDefaultsHelpFormatter
super().__init__(**A_ )
if dataclasses.is_dataclass(A_ ):
A = [dataclass_types]
A = list(A_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(A_ )
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ,A_ : dataclasses.Field ) -> Optional[Any]:
A = F'--{field.name}'
A = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,A_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A = kwargs.pop('aliases' ,[] )
if isinstance(A_ ,A_ ):
A = [aliases]
A = getattr(field.type ,'__origin__' ,field.type )
if origin_type is Union or (hasattr(A_ ,'UnionType' ) and isinstance(A_ ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(A_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(A_ ) not in field.type.__args__:
# filter `str` in Union
A = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A = getattr(field.type ,'__origin__' ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A = (
field.type.__args__[0] if isinstance(A_ ,field.type.__args__[1] ) else field.type.__args__[1]
)
A = getattr(field.type ,'__origin__' ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A = {}
if origin_type is Literal or (isinstance(field.type ,A_ ) and issubclass(field.type ,A_ )):
if origin_type is Literal:
A = field.type.__args__
else:
A = [x.value for x in field.type]
A = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A = field.default
else:
A = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A = copy(A_ )
# Hack because type=bool in argparse does not behave as we want.
A = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A = default
# This tells argparse we accept 0 or 1 value after --field_name
A = '?'
# This is the value that will get picked if we do --field_name (without value)
A = True
elif isclass(A_ ) and issubclass(A_ ,A_ ):
A = field.type.__args__[0]
A = '+'
if field.default_factory is not dataclasses.MISSING:
A = field.default_factory()
elif field.default is dataclasses.MISSING:
A = True
else:
A = field.type
if field.default is not dataclasses.MISSING:
A = field.default
elif field.default_factory is not dataclasses.MISSING:
A = field.default_factory()
else:
A = True
parser.add_argument(A_ ,*A_ ,**A_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A = False
parser.add_argument(F'--no_{field.name}' ,action='store_false' ,dest=field.name ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : DataClassType ) -> List[Any]:
if hasattr(A_ ,'_argument_group_name' ):
A = self.add_argument_group(dtype._argument_group_name )
else:
A = self
try:
A = get_type_hints(A_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A_ ):
A = '.'.join(map(A_ ,sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(A_ ):
if not field.init:
continue
A = type_hints[field.name]
self._parse_dataclass_field(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Any=None ,A_ : int=False ,A_ : Any=True ,A_ : List[str]=None ,A_ : Union[str, Any]=None ,) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A = []
if args_filename:
args_files.append(Path(A_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A = ArgumentParser()
args_file_parser.add_argument(A_ ,type=A_ ,action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A , A = args_file_parser.parse_known_args(args=A_ )
A = vars(A_ ).get(args_file_flag.lstrip('-' ) ,A_ )
if cmd_args_file_paths:
args_files.extend([Path(A_ ) for p in cmd_args_file_paths] )
A = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A = file_args + args if args is not None else file_args + sys.argv[1:]
A , A = self.parse_known_args(args=A_ )
A = []
for dtype in self.dataclass_types:
A = {f.name for f in dataclasses.fields(A_ ) if f.init}
A = {k: v for k, v in vars(A_ ).items() if k in keys}
for k in keys:
delattr(A_ ,A_ )
A = dtype(**A_ )
outputs.append(A_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(A_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Dict[str, Any] ,A_ : bool = False ) -> Tuple[DataClass, ...]:
A = set(args.keys() )
A = []
for dtype in self.dataclass_types:
A = {f.name for f in dataclasses.fields(A_ ) if f.init}
A = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A = dtype(**A_ )
outputs.append(A_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(A_ )}' )
return tuple(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]:
with open(Path(A_ ) ,encoding='utf-8' ) as open_json_file:
A = json.loads(open_json_file.read() )
A = self.parse_dict(A_ ,allow_extra_keys=A_ )
return tuple(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : bool = False ) -> Tuple[DataClass, ...]:
A = self.parse_dict(yaml.safe_load(Path(A_ ).read_text() ) ,allow_extra_keys=A_ )
return tuple(A_ ) | 74 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : List[Any] = '''ylacombe/bark-small'''
lowercase : str = tempfile.mkdtemp()
lowercase : List[Any] = '''en_speaker_1'''
lowercase : Optional[int] = '''This is a test string'''
lowercase : Any = '''speaker_embeddings_path.json'''
lowercase : Optional[int] = '''speaker_embeddings'''
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **__A )
def __lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
lowercase : Dict = self.get_tokenizer()
lowercase : Optional[int] = BarkProcessor(tokenizer=__A )
processor.save_pretrained(self.tmpdirname )
lowercase : Any = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __lowerCamelCase ( self ):
lowercase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase : List[str] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __lowerCamelCase ( self ):
lowercase : Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase : Union[str, Any] = 35
lowercase : Dict = 2
lowercase : str = 8
lowercase : List[Any] = {
'''semantic_prompt''': np.ones(__A ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase : Union[str, Any] = processor(text=self.input_string , voice_preset=__A )
lowercase : Dict = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__A , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase : Optional[Any] = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(__A , **__A )
lowercase : List[Any] = processor(text=self.input_string , voice_preset=__A )
lowercase : Optional[Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__A , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase : List[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __lowerCamelCase ( self ):
lowercase : int = self.get_tokenizer()
lowercase : Optional[int] = BarkProcessor(tokenizer=__A )
lowercase : Any = processor(text=self.input_string )
lowercase : List[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=__A , return_attention_mask=__A , return_token_type_ids=__A , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 359 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Tuple = 'pegasus'
A : int = ['past_key_values']
A : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE__=50265 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ , ):
lowercase : List[Any] = vocab_size
lowercase : List[Any] = max_position_embeddings
lowercase : Dict = d_model
lowercase : Optional[Any] = encoder_ffn_dim
lowercase : int = encoder_layers
lowercase : str = encoder_attention_heads
lowercase : Tuple = decoder_ffn_dim
lowercase : List[str] = decoder_layers
lowercase : List[Any] = decoder_attention_heads
lowercase : Tuple = dropout
lowercase : int = attention_dropout
lowercase : Optional[Any] = activation_dropout
lowercase : Dict = activation_function
lowercase : Optional[Any] = init_std
lowercase : Tuple = encoder_layerdrop
lowercase : Optional[int] = decoder_layerdrop
lowercase : List[Any] = use_cache
lowercase : Any = encoder_layers
lowercase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
| 173 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a = logging.get_logger(__name__)
# General docstring
__a = 'RegNetConfig'
# Base docstring
__a = 'facebook/regnet-y-040'
__a = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__a = 'facebook/regnet-y-040'
__a = 'tabby, tabby cat'
__a = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding='''VALID''' , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' , )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
lowercase_ = ACTaFN[activation] if activation is not None else tf.identity
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = self.convolution(self.padding(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : str ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_channels
lowercase_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
lowercase_ = shape_list(SCREAMING_SNAKE_CASE_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1) )
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
lowercase_ = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
for layer_module in self.attention:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_state * pooled
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.2''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.3''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
lowercase_ = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''layers.0''' ),
*[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> int:
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'''stages.{i+1}''' ) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> TFBaseModelOutputWithNoAttention:
lowercase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
lowercase_ = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ )
@keras_serializable
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
a :str = RegNetConfig
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config
lowercase_ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name='''embedder''' )
lowercase_ = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name='''encoder''' )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
@unpack_inputs
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = encoder_outputs[0]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
# Change to NCHW output format have uniformity in the modules
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase_ = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = RegNetConfig
a :Any = 'regnet'
a :List[str] = 'pixel_values'
@property
def _lowercase ( self : List[str] ) -> str:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
__a = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_labels
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
# classification head
lowercase_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.pooler_output if return_dict else outputs[1]
lowercase_ = self.classifier[0](SCREAMING_SNAKE_CASE_ )
lowercase_ = self.classifier[1](SCREAMING_SNAKE_CASE_ )
lowercase_ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ )
if not return_dict:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
| 30 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowerCAmelCase ( unittest.TestCase ,lowercase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
__lowercase = load_tool("text-classification" )
self.tool.setup()
__lowercase = load_tool("text-classification", remote=UpperCAmelCase__ )
def _lowercase ( self : str ):
__lowercase = self.tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : str ):
__lowercase = self.remote_tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : List[str] ):
__lowercase = self.tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : Tuple ):
__lowercase = self.remote_tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
| 17 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365 |
"""simple docstring"""
from torch import nn
class __snake_case ( nn.Module):
def __init__( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : List[str] = class_size
_lowerCamelCase : List[str] = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_lowerCamelCase : Optional[int] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.mlp(__lowerCAmelCase )
return logits
| 175 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : int = 'audio-spectrogram-transformer'
def __init__( self: Optional[Any] , snake_case: List[str]=768 , snake_case: Optional[Any]=12 , snake_case: int=12 , snake_case: int=3_072 , snake_case: List[str]="gelu" , snake_case: List[Any]=0.0 , snake_case: Optional[Any]=0.0 , snake_case: int=0.0_2 , snake_case: Union[str, Any]=1E-12 , snake_case: Any=16 , snake_case: str=True , snake_case: List[str]=10 , snake_case: int=10 , snake_case: Dict=1_024 , snake_case: Optional[int]=128 , **snake_case: List[Any] , ) -> Tuple:
super().__init__(**lowerCAmelCase__ )
snake_case_ :Tuple = hidden_size
snake_case_ :Any = num_hidden_layers
snake_case_ :List[str] = num_attention_heads
snake_case_ :Any = intermediate_size
snake_case_ :Any = hidden_act
snake_case_ :Tuple = hidden_dropout_prob
snake_case_ :Optional[int] = attention_probs_dropout_prob
snake_case_ :Tuple = initializer_range
snake_case_ :int = layer_norm_eps
snake_case_ :Union[str, Any] = patch_size
snake_case_ :int = qkv_bias
snake_case_ :List[Any] = frequency_stride
snake_case_ :List[str] = time_stride
snake_case_ :str = max_length
snake_case_ :Tuple = num_mel_bins
| 66 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : jnp.ndarray
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_snake_case : int = 3_2
_snake_case : int = 4
_snake_case : int = 4
_snake_case : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_snake_case : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_snake_case : Union[bool, Tuple[bool]] = False
_snake_case : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_snake_case : int = 2
_snake_case : Union[int, Tuple[int]] = 8
_snake_case : Optional[Union[int, Tuple[int]]] = None
_snake_case : int = 1_2_8_0
_snake_case : float = 0.0
_snake_case : bool = False
_snake_case : jnp.dtype = jnp.floataa
_snake_case : bool = True
_snake_case : int = 0
_snake_case : bool = False
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : jax.random.KeyArray ) -> FrozenDict:
'''simple docstring'''
_UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCamelCase = jnp.zeros(lowerCAmelCase__ , dtype=jnp.floataa )
_UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
_UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_UpperCamelCase , _UpperCamelCase = jax.random.split(lowerCAmelCase__ )
_UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["params"]
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.block_out_channels
_UpperCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCamelCase = self.num_attention_heads or self.attention_head_dim
# input
_UpperCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_UpperCamelCase = FlaxTimestepEmbedding(lowerCAmelCase__ , dtype=self.dtype )
_UpperCamelCase = self.only_cross_attention
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
_UpperCamelCase = []
_UpperCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = block_out_channels[i]
_UpperCamelCase = i == len(lowerCAmelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxDownBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCAmelCase__ )
_UpperCamelCase = down_blocks
# mid
_UpperCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_UpperCamelCase = []
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = list(reversed(lowerCAmelCase__ ) )
_UpperCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_UpperCamelCase = output_channel
_UpperCamelCase = reversed_block_out_channels[i]
_UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(lowerCAmelCase__ ) - 1 )]
_UpperCamelCase = i == len(lowerCAmelCase__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_UpperCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCamelCase = FlaxUpBlockaD(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowerCAmelCase__ )
_UpperCamelCase = output_channel
_UpperCamelCase = up_blocks
# out
_UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , jnp.ndarray ):
_UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_UpperCamelCase = timesteps.astype(dtype=jnp.floataa )
_UpperCamelCase = jnp.expand_dims(lowerCAmelCase__ , 0 )
_UpperCamelCase = self.time_proj(lowerCAmelCase__ )
_UpperCamelCase = self.time_embedding(lowerCAmelCase__ )
# 2. pre-process
_UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) )
_UpperCamelCase = self.conv_in(lowerCAmelCase__ )
# 3. down
_UpperCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
else:
_UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_UpperCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowerCAmelCase__ , lowerCAmelCase__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_UpperCamelCase = new_down_block_res_samples
# 4. mid
_UpperCamelCase = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
_UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = up_block(
lowerCAmelCase__ , temb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train , )
else:
_UpperCamelCase = up_block(lowerCAmelCase__ , temb=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train )
# 6. post-process
_UpperCamelCase = self.conv_norm_out(lowerCAmelCase__ )
_UpperCamelCase = nn.silu(lowerCAmelCase__ )
_UpperCamelCase = self.conv_out(lowerCAmelCase__ )
_UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowerCAmelCase__ )
| 324 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : str=13 , lowercase_ : List[Any]=30 , lowercase_ : Any=2 , lowercase_ : Optional[Any]=3 , lowercase_ : Optional[int]=True , lowercase_ : str=True , lowercase_ : Union[str, Any]=32 , lowercase_ : List[Any]=2 , lowercase_ : Tuple=4 , lowercase_ : str=37 , lowercase_ : Optional[int]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Optional[Any]=10 , lowercase_ : int=0.02 , lowercase_ : str=3 , lowercase_ : int=0.6 , lowercase_ : Tuple=None , ):
lowercase_ : Dict = parent
lowercase_ : List[str] = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : List[Any] = patch_size
lowercase_ : List[Any] = num_channels
lowercase_ : Optional[int] = is_training
lowercase_ : Tuple = use_labels
lowercase_ : Any = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Optional[Any] = type_sequence_label_size
lowercase_ : List[str] = initializer_range
lowercase_ : int = mask_ratio
lowercase_ : List[str] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase_ : str = (image_size // patch_size) ** 2
lowercase_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : int ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Any , lowercase_ : str , lowercase_ : Union[str, Any] ):
lowercase_ : Union[str, Any] = TFViTMAEModel(config=snake_case__ )
lowercase_ : List[Any] = model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[str] ):
lowercase_ : Union[str, Any] = TFViTMAEForPreTraining(snake_case__ )
lowercase_ : Union[str, Any] = model(snake_case__ , training=snake_case__ )
# expected sequence length = num_patches
lowercase_ : int = (self.image_size // self.patch_size) ** 2
lowercase_ : Dict = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase_ : Tuple = 1
lowercase_ : Any = TFViTMAEForPreTraining(snake_case__ )
lowercase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Dict = model(snake_case__ , training=snake_case__ )
lowercase_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Tuple = self.prepare_config_and_inputs()
(lowercase_) : List[str] = config_and_inputs
lowercase_ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __magic_name__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase):
UpperCamelCase__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCamelCase__ = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Union[str, Any] = TFViTMAEModelTester(self )
lowercase_ : Tuple = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Layer ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = model_class(snake_case__ )
lowercase_ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Tuple = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
np.random.seed(2 )
lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Any = int((config.image_size // config.patch_size) ** 2 )
lowercase_ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = model_class(snake_case__ )
lowercase_ : Union[str, Any] = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase_ : Optional[int] = model(snake_case__ , noise=snake_case__ )
lowercase_ : int = copy.deepcopy(self._prepare_for_class(snake_case__ , snake_case__ ) )
lowercase_ : int = model(**snake_case__ , noise=snake_case__ )
lowercase_ : str = outputs_dict[0].numpy()
lowercase_ : int = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
np.random.seed(2 )
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : int = int((config.image_size // config.patch_size) ** 2 )
lowercase_ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(snake_case__ ):
lowercase_ : Tuple = v.numpy()
else:
lowercase_ : int = np.array(snake_case__ )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(snake_case__ )
lowercase_ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase_ : int = prepare_numpy_arrays(snake_case__ )
lowercase_ : Tuple = model(snake_case__ , noise=snake_case__ )
lowercase_ : Optional[int] = model(**snake_case__ , noise=snake_case__ )
self.assert_outputs_same(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : List[str] ):
np.random.seed(2 )
lowercase_ : int = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase_ : Optional[int] = tf.constant(snake_case__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase_ : Union[str, Any] = tf_noise
super().check_pt_tf_models(snake_case__ , snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
np.random.seed(2 )
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(snake_case__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(snake_case__ , snake_case__ ),)
if isinstance(snake_case__ , snake_case__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(snake_case__ , """_keras_serializable""" , snake_case__ )
}
lowercase_ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase_ : str = tf.convert_to_tensor(snake_case__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowercase_ : List[Any] = main_layer_class(snake_case__ )
lowercase_ : Dict = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase_ : List[Any] = tf.keras.Model(snake_case__ , outputs=main_layer(snake_case__ ) )
lowercase_ : Tuple = model(snake_case__ )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : str = os.path.join(snake_case__ , """keras_model.h5""" )
model.save(snake_case__ )
lowercase_ : Optional[int] = tf.keras.models.load_model(
snake_case__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(snake_case__ , tf.keras.Model )
lowercase_ : List[str] = model(snake_case__ )
self.assert_outputs_same(snake_case__ , snake_case__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
np.random.seed(2 )
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Tuple = int((config.image_size // config.patch_size) ** 2 )
lowercase_ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(snake_case__ )
lowercase_ : str = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase_ : str = model(snake_case__ , noise=snake_case__ )
if model_class.__name__ == "TFViTMAEModel":
lowercase_ : Tuple = outputs.last_hidden_state.numpy()
lowercase_ : int = 0
else:
lowercase_ : Optional[int] = outputs.logits.numpy()
lowercase_ : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ , saved_model=snake_case__ )
lowercase_ : List[Any] = model_class.from_pretrained(snake_case__ )
lowercase_ : List[Any] = model(snake_case__ , noise=snake_case__ )
if model_class.__name__ == "TFViTMAEModel":
lowercase_ : int = after_outputs['last_hidden_state'].numpy()
lowercase_ : Dict = 0
else:
lowercase_ : Tuple = after_outputs['logits'].numpy()
lowercase_ : List[Any] = 0
lowercase_ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ , 1E-5 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
np.random.seed(2 )
lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Tuple = int((config.image_size // config.patch_size) ** 2 )
lowercase_ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(snake_case__ )
lowercase_ : Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ )
lowercase_ : Tuple = model(snake_case__ , noise=snake_case__ )
lowercase_ : Union[str, Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(snake_case__ )
lowercase_ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase_ : str = model_class.from_config(model.config )
lowercase_ : int = new_model(snake_case__ ) # Build model
new_model.set_weights(model.get_weights() )
lowercase_ : List[str] = new_model(snake_case__ , noise=snake_case__ )
self.assert_outputs_same(snake_case__ , snake_case__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Tuple = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(snake_case__ )
def lowerCamelCase ( ) -> int:
lowercase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
np.random.seed(2 )
lowercase_ : Optional[int] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowercase_ : str = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=snake_case__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase_ : Dict = ViTMAEConfig()
lowercase_ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase_ : Dict = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase_ : Dict = model(**snake_case__ , noise=snake_case__ )
# verify the logits
lowercase_ : Tuple = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowercase_ : int = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , snake_case__ , atol=1E-4 )
| 352 | '''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> float:
lowercase_ : List[Any] = x
lowercase_ : Any = y
for step in range(UpperCAmelCase__ ): # noqa: B007
lowercase_ : Dict = a * a - b * b + x
lowercase_ : str = 2 * a * b + y
lowercase_ : Optional[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase ( UpperCAmelCase__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) )
def lowerCamelCase ( UpperCAmelCase__ : int = 800 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = -0.6 , UpperCAmelCase__ : float = 0 , UpperCAmelCase__ : float = 3.2 , UpperCAmelCase__ : int = 50 , UpperCAmelCase__ : bool = True , ) -> Image.Image:
lowercase_ : Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) )
lowercase_ : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase__ ):
for image_y in range(UpperCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
lowercase_ : Any = figure_width / image_width * image_height
lowercase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowercase_ : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowercase_ : str = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowercase_ : List[Any] = get_color_coded_rgb(UpperCAmelCase__ )
else:
lowercase_ : Dict = get_black_and_white_rgb(UpperCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowercase : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 21 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
while a != 0:
UpperCAmelCase , UpperCAmelCase = b % a, a
return b
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if gcd(lowercase_ , lowercase_ ) != 1:
UpperCAmelCase = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(lowercase_ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1, 0, a
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0, 1, m
while va != 0:
UpperCAmelCase = ua // va
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 78 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
a_ = datasets.logging.get_logger(__name__)
a_ = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
a_ = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
a_ = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
a_ = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __magic_name__ ( self : Any ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def __magic_name__ ( self : str , __lowercase : Any ) -> List[Any]:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
SCREAMING_SNAKE_CASE__ : Tuple ='''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE__ : Dict =self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE__ : Dict =self.config_name.upper()
else:
raise KeyError(
F"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
SCREAMING_SNAKE_CASE__ : List[str] =dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
SCREAMING_SNAKE_CASE__ : Any =score.BleurtScorer(os.path.join(__lowercase , __lowercase ) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Any , __lowercase : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =self.scorer.score(references=__lowercase , candidates=__lowercase )
return {"scores": scores} | 152 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class a :
_lowerCAmelCase = None
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = None
_lowerCAmelCase = 1
_lowerCAmelCase = None
_lowerCAmelCase = False
_lowerCAmelCase = None
_lowerCAmelCase = None
def __UpperCAmelCase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(__magic_name__ ) for k, v in self.__dict__.items()} )
| 350 |
'''simple docstring'''
from timeit import timeit
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
_a = 0
while number:
number &= number - 1
result += 1
return result
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if number < 0:
raise ValueError('the value of input must not be negative' )
_a = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _A () -> None:
'''simple docstring'''
def do_benchmark(lowerCAmelCase__ :int ) -> None:
_a = 'import __main__ as z'
print(f'Benchmark when {number = }:' )
print(f'{get_set_bits_count_using_modulo_operator(lowerCAmelCase__ ) = }' )
_a = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=lowerCAmelCase__ )
print(f'timeit() runs in {timing} seconds' )
print(f'{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase__ ) = }' )
_a = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=lowerCAmelCase__ , )
print(f'timeit() runs in {timing} seconds' )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 104 | 0 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class __UpperCamelCase ( __UpperCAmelCase ):
def __a ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str:
if tokenize_kwargs is None:
a : Union[str, Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" )
a : Any = truncation
a : Optional[Any] = tokenize_kwargs
a : Optional[Any] = {}
if return_tensors is not None:
a : str = return_tensors
return preprocess_params, {}, postprocess_params
def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
a : Tuple = self.framework
a : str = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
return model_inputs
def __a ( self , lowerCAmelCase__ ) -> str:
a : str = self.model(**lowerCAmelCase_ )
return model_outputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[int]:
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
return super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 105 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = "T5Config"
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
| 136 | 0 |
def __lowercase ( a__ = 1_00_00_00 ) -> int:
__SCREAMING_SNAKE_CASE = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , a__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 118 |
def __lowercase ( a__ = 10_00 ) -> int:
__SCREAMING_SNAKE_CASE = -1
__SCREAMING_SNAKE_CASE = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__SCREAMING_SNAKE_CASE = (n * n - 2 * a * n) // (2 * n - 2 * a)
__SCREAMING_SNAKE_CASE = n - a - b
if c * c == (a * a + b * b):
__SCREAMING_SNAKE_CASE = a * b * c
if candidate >= product:
__SCREAMING_SNAKE_CASE = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 118 | 1 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_a : List[str] = ''
_a : str = ''
_a : Optional[Any] = ''
_a : str = 1 # (0 is vertical, 1 is horizontal)
def SCREAMING_SNAKE_CASE ( ) -> None:
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_dataset(_lowerCamelCase ,_lowerCamelCase )
print("""Processing...""" )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = update_image_and_anno(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for index, image in enumerate(_lowerCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase : Optional[Any] = random_chars(32 )
_lowerCAmelCase : Dict = paths[index].split(os.sep )[-1].rsplit(""".""" ,1 )[0]
_lowerCAmelCase : Union[str, Any] = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(f"/{file_root}.jpg" ,_lowerCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"Success {index+1}/{len(_lowerCamelCase )} with {file_name}" )
_lowerCAmelCase : str = []
for anno in new_annos[index]:
_lowerCAmelCase : Optional[Any] = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(_lowerCamelCase )
with open(f"/{file_root}.txt" ,"""w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> tuple[list, list]:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Tuple = []
for label_file in glob.glob(os.path.join(_lowerCamelCase ,"""*.txt""" ) ):
_lowerCAmelCase : Dict = label_file.split(os.sep )[-1].rsplit(""".""" ,1 )[0]
with open(_lowerCamelCase ) as in_file:
_lowerCAmelCase : List[Any] = in_file.readlines()
_lowerCAmelCase : int = os.path.join(_lowerCamelCase ,f"{label_name}.jpg" )
_lowerCAmelCase : Optional[Any] = []
for obj_list in obj_lists:
_lowerCAmelCase : Dict = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_lowerCamelCase )
labels.append(_lowerCamelCase )
return img_paths, labels
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ,_lowerCamelCase : list ,_lowerCamelCase : int = 1 ) -> tuple[list, list, list]:
_lowerCAmelCase : int = []
_lowerCAmelCase : str = []
_lowerCAmelCase : Optional[int] = []
for idx in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Any = img_list[idx]
path_list.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = anno_list[idx]
_lowerCAmelCase : int = cva.imread(_lowerCamelCase )
if flip_type == 1:
_lowerCAmelCase : int = cva.flip(_lowerCamelCase ,_lowerCamelCase )
for bbox in img_annos:
_lowerCAmelCase : List[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase : str = cva.flip(_lowerCamelCase ,_lowerCamelCase )
for bbox in img_annos:
_lowerCAmelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_lowerCamelCase )
new_imgs_list.append(_lowerCamelCase )
return new_imgs_list, new_annos_lists, path_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 44 | """simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = """ylacombe/bark-small"""
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = """en_speaker_1"""
_lowerCAmelCase : List[Any] = """This is a test string"""
_lowerCAmelCase : Any = """speaker_embeddings_path.json"""
_lowerCAmelCase : List[Any] = """speaker_embeddings"""
def __A ( self , **a__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
_lowerCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(a__ , **a__ )
_lowerCAmelCase : List[Any] = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=a__ )
_lowerCAmelCase : Dict = processor(text=self.input_string )
_lowerCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 44 | 1 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Tuple = len(_lowercase )
for i in range(length - 1 ):
UpperCAmelCase : Tuple = i
for k in range(i + 1 , _lowercase ):
if collection[k] < collection[least]:
UpperCAmelCase : Any = k
if least != i:
UpperCAmelCase , UpperCAmelCase : int = (collection[i], collection[least])
return collection
if __name__ == "__main__":
a : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
a : Optional[Any] = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 338 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A = None , A = None , A = False , **A , ) -> Tuple:
super().__init__(features=A , cache_dir=A , keep_in_memory=A , **A )
UpperCAmelCase : Any = Sql(
cache_dir=A , features=A , sql=A , con=A , **A , )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = None
UpperCAmelCase : Any = None
UpperCAmelCase : int = None
UpperCAmelCase : int = None
self.builder.download_and_prepare(
download_config=A , download_mode=A , verification_mode=A , base_path=A , )
# Build dataset for splits
UpperCAmelCase : str = self.builder.as_dataset(
split="""train""" , verification_mode=A , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase_ :
def __init__( self , A , A , A , A = None , A = None , **A , ) -> str:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCAmelCase : Dict = dataset
UpperCAmelCase : List[Any] = name
UpperCAmelCase : Any = con
UpperCAmelCase : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase : Optional[Any] = num_proc
UpperCAmelCase : str = to_sql_kwargs
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.to_sql_kwargs.pop("""sql""" , A )
UpperCAmelCase : str = self.to_sql_kwargs.pop("""con""" , A )
UpperCAmelCase : Union[str, Any] = self.to_sql_kwargs.pop("""index""" , A )
UpperCAmelCase : str = self._write(index=A , **self.to_sql_kwargs )
return written
def _lowercase( self , A ) -> Any:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = args
UpperCAmelCase : Union[str, Any] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
UpperCAmelCase : int = query_table(
table=self.dataset.data , key=slice(A , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCAmelCase : Any = batch.to_pandas()
UpperCAmelCase : List[Any] = df.to_sql(self.name , self.con , index=A , **A )
return num_rows or len(A )
def _lowercase( self , A , **A ) -> int:
UpperCAmelCase : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCAmelCase , UpperCAmelCase : List[str] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , A , A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 338 | 1 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase__ = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def _a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Optional[str] , SCREAMING_SNAKE_CASE_ : Optional[str] ):
__lowerCAmelCase = to_pil_image(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = pil_image.size
__lowerCAmelCase = pytesseract.image_to_data(SCREAMING_SNAKE_CASE_ , lang=SCREAMING_SNAKE_CASE_ , output_type="dict" , config=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
__lowerCAmelCase = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE_ ) if not word.strip()]
__lowerCAmelCase = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowerCAmelCase = []
for x, y, w, h in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = [x, y, x + w, y + h]
actual_boxes.append(SCREAMING_SNAKE_CASE_ )
# finally, normalize the bounding boxes
__lowerCAmelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( snake_case__ ):
_a : Dict = ["""pixel_values"""]
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = True , _A = 1 / 2_5_5 , _A = True , _A = None , _A = None , _A = True , _A = None , _A = "" , **_A , ):
"""simple docstring"""
super().__init__(**_A )
__lowerCAmelCase = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase = get_size_dict(_A )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_value
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
__lowerCAmelCase = apply_ocr
__lowerCAmelCase = ocr_lang
__lowerCAmelCase = tesseract_config
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = PILImageResampling.BILINEAR , _A = None , **_A , ):
"""simple docstring"""
__lowerCAmelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCAmelCase = (size["height"], size["width"])
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = None , **_A , ):
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A = None , **_A , ):
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = None , _A=None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
"""simple docstring"""
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(_A )
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowerCAmelCase = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(_A ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
__lowerCAmelCase = []
__lowerCAmelCase = []
for image in images:
__lowerCAmelCase , __lowerCAmelCase = apply_tesseract(_A , _A , _A )
words_batch.append(_A )
boxes_batch.append(_A )
if do_resize:
__lowerCAmelCase = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(_A , _A ) for image in images]
__lowerCAmelCase = BatchFeature(data={"pixel_values": images} , tensor_type=_A )
if apply_ocr:
__lowerCAmelCase = words_batch
__lowerCAmelCase = boxes_batch
return data
| 92 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
__lowerCAmelCase = TypeVar('T')
__lowerCAmelCase = Union[List[T], Tuple[T, ...]]
__lowerCAmelCase = Union[T, List[T], Dict[str, T]]
__lowerCAmelCase = Union[str, bytes, os.PathLike] | 341 | 0 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__SCREAMING_SNAKE_CASE =importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__SCREAMING_SNAKE_CASE =[
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if "://" in dataset_path:
lowercase_ : List[Any] = dataset_path.split('://' )[1]
return dataset_path
def lowercase__( __SCREAMING_SNAKE_CASE : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__( __SCREAMING_SNAKE_CASE : fsspec.AbstractFileSystem , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Optional[int] = not is_remote_filesystem(__SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__SCREAMING_SNAKE_CASE ) , fs._strip_protocol(__SCREAMING_SNAKE_CASE ) )
else:
fs.mv(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , recursive=__SCREAMING_SNAKE_CASE )
def lowercase__( ):
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase_ : str = None
lowercase_ : Dict = None
lowercase_ : Optional[Any] = threading.Lock()
| 362 | """simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = name
lowercase_ : int = val
def __str__( self ) -> Tuple:
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return self.val < other.val
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[int] = {}
lowercase_ : Tuple = {}
lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase )
def __getitem__( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
return self.get_value(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
return (idx - 1) // 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return idx * 2 + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
return idx * 2 + 2
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
return self.heap_dict[key]
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1
lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
lowercase_ : Any = idx
lowercase_ : str = i.val
for i in range(__UpperCamelCase ,-1 ,-1 ):
self.sift_down(__UpperCamelCase ,__UpperCamelCase )
return array
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
while True:
lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase )
lowercase_ : List[str] = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
lowercase_ : List[str] = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
lowercase_ : Dict = r
if smallest != idx:
lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx]
(
(
lowercase_
) , (
lowercase_
) ,
) : str = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase_ : Any = smallest
else:
break
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase_ : int = p
lowercase_ : str = self.get_parent_idx(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return self.heap[0]
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0]
lowercase_ , lowercase_ : Tuple = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase_ : Tuple = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 ,self.heap )
return x
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
self.heap.append(__UpperCamelCase )
lowercase_ : Tuple = len(self.heap ) - 1
lowercase_ : Optional[int] = node.val
self.sift_up(len(self.heap ) - 1 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.heap ) == 0
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase_ : Any = new_value
lowercase_ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
__SCREAMING_SNAKE_CASE =Node("R", -1)
__SCREAMING_SNAKE_CASE =Node("B", 6)
__SCREAMING_SNAKE_CASE =Node("A", 3)
__SCREAMING_SNAKE_CASE =Node("X", 1)
__SCREAMING_SNAKE_CASE =Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 | 0 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : str = []
for rt in rc.restypes:
_SCREAMING_SNAKE_CASE : Optional[int] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_SCREAMING_SNAKE_CASE : Optional[Any] = {name: i for i, name in enumerate(SCREAMING_SNAKE_CASE__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(
SCREAMING_SNAKE_CASE__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
SCREAMING_SNAKE_CASE__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
_SCREAMING_SNAKE_CASE : str = torch.tensor(
SCREAMING_SNAKE_CASE__ , dtype=torch.floataa , device=protein["""aatype"""].device , )
_SCREAMING_SNAKE_CASE : int = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_SCREAMING_SNAKE_CASE : List[str] = restype_atomaa_to_atomaa[protein_aatype]
_SCREAMING_SNAKE_CASE : str = restype_atomaa_mask[protein_aatype]
_SCREAMING_SNAKE_CASE : List[Any] = residx_atomaa_mask
_SCREAMING_SNAKE_CASE : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_SCREAMING_SNAKE_CASE : int = restype_atomaa_to_atomaa[protein_aatype]
_SCREAMING_SNAKE_CASE : Dict = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_SCREAMING_SNAKE_CASE : str = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
_SCREAMING_SNAKE_CASE : int = rc.restype_atoa[restype_letter]
_SCREAMING_SNAKE_CASE : Dict = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_SCREAMING_SNAKE_CASE : List[Any] = rc.atom_order[atom_name]
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : int = restype_atomaa_mask[protein_aatype]
_SCREAMING_SNAKE_CASE : int = residx_atomaa_mask
return protein
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = tree_map(lambda SCREAMING_SNAKE_CASE__ : torch.tensor(SCREAMING_SNAKE_CASE__ , device=batch["""aatype"""].device ) , SCREAMING_SNAKE_CASE__ , np.ndarray )
_SCREAMING_SNAKE_CASE : Optional[Any] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE__ : np.array(SCREAMING_SNAKE_CASE__ ) , make_atomaa_masks(SCREAMING_SNAKE_CASE__ ) )
return out
| 200 |
'''simple docstring'''
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case = "" , __snake_case = False ):
# Mapping from the first character of the prefix of the node
_SCREAMING_SNAKE_CASE : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
_SCREAMING_SNAKE_CASE : List[Any] = is_leaf
_SCREAMING_SNAKE_CASE : Optional[Any] = prefix
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = 0
for q, w in zip(self.prefix , __snake_case ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self , __snake_case ):
for word in words:
self.insert(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
_SCREAMING_SNAKE_CASE : List[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_SCREAMING_SNAKE_CASE : List[str] = RadixNode(prefix=__snake_case , is_leaf=__snake_case )
else:
_SCREAMING_SNAKE_CASE : int = self.nodes[word[0]]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = incoming_node.match(
__snake_case )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__snake_case )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = remaining_prefix
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.nodes[matching_string[0]]
_SCREAMING_SNAKE_CASE : List[Any] = RadixNode(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = aux_node
if remaining_word == "":
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
self.nodes[matching_string[0]].insert(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.nodes.get(word[0] , __snake_case )
if not incoming_node:
return False
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = incoming_node.match(
__snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = self.nodes.get(word[0] , __snake_case )
if not incoming_node:
return False
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = incoming_node.match(
__snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__snake_case )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_SCREAMING_SNAKE_CASE : Optional[Any] = list(self.nodes.values() )[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
_SCREAMING_SNAKE_CASE : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_SCREAMING_SNAKE_CASE : List[str] = False
# If there is 1 edge, we merge it with its child
else:
_SCREAMING_SNAKE_CASE : int = list(incoming_node.nodes.values() )[0]
_SCREAMING_SNAKE_CASE : Tuple = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_SCREAMING_SNAKE_CASE : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self , __snake_case = 0 ):
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """banana bananas bandana band apple all beast""".split()
_SCREAMING_SNAKE_CASE : Optional[Any] = RadixNode()
root.insert_many(SCREAMING_SNAKE_CASE__ )
assert all(root.find(SCREAMING_SNAKE_CASE__ ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def snake_case_ ( ):
"""simple docstring"""
assert test_trie()
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = RadixNode()
_SCREAMING_SNAKE_CASE : Optional[int] = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(SCREAMING_SNAKE_CASE__ )
print("""Words:""" , SCREAMING_SNAKE_CASE__ )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 200 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
UpperCAmelCase_ : Dict = ksize + 1
UpperCAmelCase_ : Tuple = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
# distance from center
UpperCAmelCase_ : Any = x - ksize // 2
UpperCAmelCase_ : List[str] = y - ksize // 2
# degree to radiant
UpperCAmelCase_ : Union[str, Any] = theta / 180 * np.pi
UpperCAmelCase_ : Dict = np.cos(_theta )
UpperCAmelCase_ : List[str] = np.sin(_theta )
# get kernel x
UpperCAmelCase_ : Optional[Any] = cos_theta * px + sin_theta * py
# get kernel y
UpperCAmelCase_ : Union[str, Any] = -sin_theta * px + cos_theta * py
# fill kernel
UpperCAmelCase_ : str = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_a = imread('../image_data/lena.jpg')
# turn image in gray scale value
_a = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_a = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_a = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_a = out / out.max() * 255
_a = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 23 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __a ( __lowerCamelCase ):
return x[0]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase )
UpperCAmelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
UpperCAmelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase )
UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] )
UpperCAmelCase_ : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase )
UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase )
UpperCAmelCase_ : int = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 1 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
SCREAMING_SNAKE_CASE__ = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(SCREAMING_SNAKE_CASE ) , version.parse(SCREAMING_SNAKE_CASE ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> None:
__lowercase = F"""\n{hint}""" if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$' , SCREAMING_SNAKE_CASE ):
__lowercase , __lowercase , __lowercase = requirement, None, None
else:
__lowercase = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F""" got {requirement}""" )
__lowercase , __lowercase = match[0]
__lowercase = want_full.split(',' ) # there could be multiple requirements
__lowercase = {}
for w in want_range:
__lowercase = re.findall(R'^([\s!=<>]{1,2})(.+)' , SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F""" but got {requirement}""" )
__lowercase , __lowercase = match[0]
__lowercase = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
__lowercase = '.'.join([str(SCREAMING_SNAKE_CASE ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return
# check if any version is installed
try:
__lowercase = importlib.metadata.version(SCREAMING_SNAKE_CASE )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> Tuple:
__lowercase = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 325 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE__ = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
__lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
__lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int:
def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )
__lowercase = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
__lowercase = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowercase = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
SCREAMING_SNAKE_CASE__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase = spec.loader.load_module()
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 325 | 1 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class __magic_name__ ( _a ):
SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self , __snake_case = True , __snake_case = None , __snake_case = PILImageResampling.BICUBIC , __snake_case = True , __snake_case = None , __snake_case = True , __snake_case = 1 / 255 , __snake_case = True , __snake_case = IMAGENET_DEFAULT_MEAN , __snake_case = IMAGENET_DEFAULT_STD , **__snake_case , ) -> None:
'''simple docstring'''
super().__init__(**__snake_case )
__a =size if size is not None else {'shortest_edge': 224}
__a =get_size_dict(__snake_case , default_to_square=__snake_case )
__a =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__a =get_size_dict(__snake_case , param_name='crop_size' )
__a =do_resize
__a =size
__a =resample
__a =do_center_crop
__a =crop_size
__a =do_rescale
__a =rescale_factor
__a =do_normalize
__a =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = PILImageResampling.BICUBIC , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
__a =get_size_dict(__snake_case , default_to_square=__snake_case )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__a =int((256 / 224) * size['shortest_edge'] )
__a =get_resize_output_image_size(__snake_case , size=__snake_case , default_to_square=__snake_case )
__a ={'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
__snake_case , size=(size_dict['height'], size_dict['width']) , resample=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
__a =get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(__snake_case , size=(size['height'], size['width']) , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case = None , **__snake_case , ) -> np.ndarray:
'''simple docstring'''
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = ChannelDimension.FIRST , **__snake_case , ) -> BatchFeature:
'''simple docstring'''
__a =do_resize if do_resize is not None else self.do_resize
__a =resample if resample is not None else self.resample
__a =do_center_crop if do_center_crop is not None else self.do_center_crop
__a =do_rescale if do_rescale is not None else self.do_rescale
__a =rescale_factor if rescale_factor is not None else self.rescale_factor
__a =do_normalize if do_normalize is not None else self.do_normalize
__a =image_mean if image_mean is not None else self.image_mean
__a =image_std if image_std is not None else self.image_std
__a =size if size is not None else self.size
__a =get_size_dict(__snake_case , default_to_square=__snake_case )
__a =crop_size if crop_size is not None else self.crop_size
__a =get_size_dict(__snake_case , param_name='crop_size' )
__a =make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__a =[to_numpy_array(__snake_case ) for image in images]
if do_resize:
__a =[self.resize(__snake_case , __snake_case , __snake_case ) for image in images]
if do_center_crop:
__a =[self.center_crop(__snake_case , __snake_case ) for image in images]
if do_rescale:
__a =[self.rescale(__snake_case , __snake_case ) for image in images]
if do_normalize:
__a =[self.normalize(__snake_case , __snake_case , __snake_case ) for image in images]
__a =[to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
__a ={'pixel_values': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 368 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='utf-8' , check=__snake_case , )
assert hasattr(self , 'env' )
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
__a =f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
__a ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__snake_case , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__snake_case , py_version='py36' , )
def __magic_name__ ( self , __snake_case ) -> Optional[Any]:
'''simple docstring'''
TrainingJobAnalytics(__snake_case ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(2,)] )
def __magic_name__ ( self , __snake_case ) -> Optional[int]:
'''simple docstring'''
# create estimator
__a =self.create_estimator(__snake_case )
# run training
estimator.fit()
# result dataframe
__a =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__a =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __snake_case )
| 308 | 0 |
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
UpperCamelCase__ = data
UpperCamelCase__ = previous
UpperCamelCase__ = next_node
def __str__(self ):
return F"{self.data}"
def UpperCAmelCase_ (self ):
return self.data
def UpperCAmelCase_ (self ):
return self.next
def UpperCAmelCase_ (self ):
return self.previous
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = head
def __iter__(self ):
return self
def UpperCAmelCase_ (self ):
if not self.current:
raise StopIteration
else:
UpperCamelCase__ = self.current.get_data()
UpperCamelCase__ = self.current.get_next()
return value
class __A:
"""simple docstring"""
def __init__(self ):
UpperCamelCase__ = None # First node in list
UpperCamelCase__ = None # Last node in list
def __str__(self ):
UpperCamelCase__ = self.head
UpperCamelCase__ = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase__ = current.get_next()
return " ".join(str(snake_case__ ) for node in nodes )
def __contains__(self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase__ = current.get_next()
return False
def __iter__(self ):
return LinkedListIterator(self.head )
def UpperCAmelCase_ (self ):
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase_ (self ):
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
if self.head is None:
UpperCamelCase__ = node
UpperCamelCase__ = node
else:
self.insert_before_node(self.head , snake_case__ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
if self.head is None:
self.set_head(snake_case__ )
else:
self.insert_after_node(self.tail , snake_case__ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = Node(snake_case__ )
if self.head is None:
self.set_head(snake_case__ )
else:
self.set_tail(snake_case__ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = node
UpperCamelCase__ = node.previous
if node.get_previous() is None:
UpperCamelCase__ = node_to_insert
else:
UpperCamelCase__ = node_to_insert
UpperCamelCase__ = node_to_insert
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = node
UpperCamelCase__ = node.next
if node.get_next() is None:
UpperCamelCase__ = node_to_insert
else:
UpperCamelCase__ = node_to_insert
UpperCamelCase__ = node_to_insert
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = 1
UpperCamelCase__ = Node(snake_case__ )
UpperCamelCase__ = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case__ , snake_case__ )
return
current_position += 1
UpperCamelCase__ = node.next
self.insert_after_node(self.tail , snake_case__ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase__ = node.get_next()
raise Exception("""Node not found""" )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
if (node := self.get_node(snake_case__ )) is not None:
if node == self.head:
UpperCamelCase__ = self.head.get_next()
if node == self.tail:
UpperCamelCase__ = self.tail.get_previous()
self.remove_node_pointers(snake_case__ )
@staticmethod
def UpperCAmelCase_ (SCREAMING_SNAKE_CASE_ ):
if node.get_next():
UpperCamelCase__ = node.previous
if node.get_previous():
UpperCamelCase__ = node.next
UpperCamelCase__ = None
UpperCamelCase__ = None
def UpperCAmelCase_ (self ):
return self.head is None
def __magic_name__ ( ):
'''simple docstring'''
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : str ) -> str:
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
lowerCamelCase_ : Optional[Any] =""
while len(lowerCamelCase__ ) % 3 != 0:
lowerCamelCase_ : Any ="0" + bin_string
lowerCamelCase_ : int =[
bin_string[index : index + 3]
for index in range(len(lowerCamelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowerCamelCase_ : int =0
for index, val in enumerate(lowerCamelCase__ ):
oct_val += int(2 ** (2 - index) * int(lowerCamelCase__ ) )
oct_string += str(lowerCamelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 144 | 0 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class _a ( __lowerCAmelCase ):
def __lt__( self ,_SCREAMING_SNAKE_CASE ) -> int:
return self[-1] < other[-1]
def __eq__( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
return self[-1] == other[-1]
def __a ( _UpperCamelCase: list ) -> list:
"""simple docstring"""
_snake_case = []
# sort into stacks
for element in collection:
_snake_case = Stack([element] )
_snake_case = bisect_left(_UpperCamelCase , _UpperCamelCase )
if i != len(_UpperCamelCase ):
stacks[i].append(_UpperCamelCase )
else:
stacks.append(_UpperCamelCase )
# use a heap-based merge to merge stack efficiently
_snake_case = merge(*(reversed(_UpperCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCamelCase_ : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase_ : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 354 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
UpperCamelCase_ : int = '''pytorch_model.bin'''
UpperCamelCase_ : str = '''pytorch_model.bin.index.json'''
UpperCamelCase_ : int = '''adapter_config.json'''
UpperCamelCase_ : str = '''adapter_model.bin'''
UpperCamelCase_ : str = '''adapter_model.safetensors'''
UpperCamelCase_ : List[Any] = '''tf_model.h5'''
UpperCamelCase_ : Union[str, Any] = '''tf_model.h5.index.json'''
UpperCamelCase_ : Tuple = '''model.ckpt'''
UpperCamelCase_ : Union[str, Any] = '''flax_model.msgpack'''
UpperCamelCase_ : Union[str, Any] = '''flax_model.msgpack.index.json'''
UpperCamelCase_ : Dict = '''model.safetensors'''
UpperCamelCase_ : List[Any] = '''model.safetensors.index.json'''
UpperCamelCase_ : Tuple = '''config.json'''
UpperCamelCase_ : List[str] = '''preprocessor_config.json'''
UpperCamelCase_ : List[Any] = FEATURE_EXTRACTOR_NAME
UpperCamelCase_ : Union[str, Any] = '''generation_config.json'''
UpperCamelCase_ : str = '''modelcard.json'''
UpperCamelCase_ : List[Any] = '''▁'''
UpperCamelCase_ : Tuple = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
UpperCamelCase_ : Any = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
UpperCamelCase_ : Tuple = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
UpperCamelCase_ : str = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __a ( _UpperCamelCase: Optional[Any] ) -> int:
"""simple docstring"""
if version.parse(_UpperCamelCase ) < version.parse(_UpperCamelCase ):
if "dev" in min_version:
_snake_case = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
_snake_case = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 142 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : int = """dpr"""
def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = vocab_size
a : Optional[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Dict = num_attention_heads
a : int = hidden_act
a : Any = intermediate_size
a : Any = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Union[str, Any] = type_vocab_size
a : Optional[Any] = initializer_range
a : Dict = layer_norm_eps
a : int = projection_dim
a : str = position_embedding_type
| 40 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _A ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase : int = 1_0_0_0_0
UpperCAmelCase : Optional[List[str]] = None
UpperCAmelCase : Optional[datasets.Features] = None
class _A ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase : str = ParquetConfig
def __snake_case ( self : Tuple):
return datasets.DatasetInfo(features=self.config.features)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''')
a : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__UpperCAmelCase , (str, list, tuple)):
a : Dict = data_files
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
a : Dict = []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__UpperCAmelCase):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase))
break
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files}))
return splits
def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema)
return pa_table
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''')
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = pq.ParquetFile(__UpperCAmelCase)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
a : Optional[Any] = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase)
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''')
raise
| 40 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE_ =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ =IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ =IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
UpperCAmelCase__ : Dict = PNDMScheduler(skip_prk_steps=snake_case__ )
torch.manual_seed(0 )
UpperCAmelCase__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCAmelCase__ : Dict = CLIPTextModel(snake_case__ )
UpperCAmelCase__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase__ : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __a ( self : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]=0 ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase__ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Any = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" )
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : int = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : Dict = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : Any = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
UpperCAmelCase__ : Optional[int] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : Optional[Any] = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase__ : str = sd_pipe(**snake_case__ ).images
UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase__ : Any = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : Any = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
UpperCAmelCase__ : str = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : int = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase__ : List[Any] = "french fries"
UpperCAmelCase__ : Any = sd_pipe(**snake_case__ , negative_prompt=snake_case__ )
UpperCAmelCase__ : Union[str, Any] = output.images
UpperCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase__ : Any = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : List[str] = self.get_dummy_components()
UpperCAmelCase__ : Tuple = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
UpperCAmelCase__ : List[str] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : List[Any] = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = [inputs["prompt"]] * 2
UpperCAmelCase__ : Optional[Any] = np.array(inputs["image"] ).astype(np.floataa ) / 255.0
UpperCAmelCase__ : Union[str, Any] = torch.from_numpy(snake_case__ ).unsqueeze(0 ).to(snake_case__ )
UpperCAmelCase__ : Optional[Any] = image / 2 + 0.5
UpperCAmelCase__ : int = image.permute(0 , 3 , 1 , 2 )
UpperCAmelCase__ : Tuple = image.repeat(2 , 1 , 1 , 1 )
UpperCAmelCase__ : Any = sd_pipe(**snake_case__ ).images
UpperCAmelCase__ : Optional[int] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
UpperCAmelCase__ : str = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : List[str] = self.get_dummy_components()
UpperCAmelCase__ : Optional[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" )
UpperCAmelCase__ : Any = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
UpperCAmelCase__ : Union[str, Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : Optional[int] = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase__ : Tuple = sd_pipe(**snake_case__ ).images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase__ : int = [round(snake_case__ , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(snake_case__ ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase__ : int = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __a ( self : Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.get_dummy_components()
UpperCAmelCase__ : Tuple = StableDiffusionInstructPixaPixPipeline(**snake_case__ )
UpperCAmelCase__ : List[str] = VaeImageProcessor(do_resize=snake_case__ , do_normalize=snake_case__ )
UpperCAmelCase__ : Dict = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : List[str] = pipe(**self.get_dummy_inputs_by_type(snake_case__ , input_image_type="pt" ) )[0]
UpperCAmelCase__ : Union[str, Any] = components["vae"]
UpperCAmelCase__ : int = self.get_dummy_inputs_by_type(snake_case__ , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCAmelCase__ : Dict = vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCAmelCase__ : str = pipe(**snake_case__ )[0]
UpperCAmelCase__ : Optional[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(snake_case__ , 1e-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Optional[Any] , snake_case__ : int=0 ):
'''simple docstring'''
UpperCAmelCase__ : str = torch.manual_seed(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
UpperCAmelCase__ : str = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Optional[Any] = self.get_inputs()
UpperCAmelCase__ : Tuple = pipe(**snake_case__ ).images
UpperCAmelCase__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ : int = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=snake_case__ )
UpperCAmelCase__ : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Any = self.get_inputs()
UpperCAmelCase__ : List[str] = pipe(**snake_case__ ).images
UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ : Optional[int] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=snake_case__ )
UpperCAmelCase__ : int = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = self.get_inputs()
UpperCAmelCase__ : List[Any] = pipe(**snake_case__ ).images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ : Any = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = 0
def callback_fn(snake_case__ : int , snake_case__ : int , snake_case__ : torch.FloatTensor ) -> None:
UpperCAmelCase__ : Optional[int] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase__ : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
UpperCAmelCase__ : Any = latents[0, -3:, -3:, -1]
UpperCAmelCase__ : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCAmelCase__ : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
UpperCAmelCase__ : Optional[int] = latents[0, -3:, -3:, -1]
UpperCAmelCase__ : Optional[Any] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=snake_case__ , torch_dtype=torch.floataa )
UpperCAmelCase__ : int = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = self.get_inputs()
pipe(**snake_case__ , callback=snake_case__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __a ( self : Dict ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=snake_case__ , torch_dtype=torch.floataa )
UpperCAmelCase__ : Tuple = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ : Optional[Any] = self.get_inputs()
UpperCAmelCase__ : str = pipe(**snake_case__ )
UpperCAmelCase__ : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase__ : Dict = inputs["image"].resize((5_0_4, 5_0_4) )
UpperCAmelCase__ : Dict = "timbrooks/instruct-pix2pix"
UpperCAmelCase__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Optional[int] = pipe(**snake_case__ )
UpperCAmelCase__ : Union[str, Any] = output.images[0]
UpperCAmelCase__ : Union[str, Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
UpperCAmelCase__ : Optional[int] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 355 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =IFPipeline
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __a ( self : Dict ):
'''simple docstring'''
return self._get_dummy_components()
def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ):
'''simple docstring'''
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : str = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self : Tuple ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self : Tuple ):
'''simple docstring'''
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __a ( self : Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __a ( self : int ):
'''simple docstring'''
self._test_save_load_local()
def __a ( self : Any ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self : Optional[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : str ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ):
'''simple docstring'''
# if
UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : List[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : str = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Tuple = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : str = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Optional[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : int = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 298 | 0 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_lowercase : str = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
warnings.warn(__A , __A )
requires_backends(__A , """sklearn""" )
return (preds == labels).mean()
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] ) -> Optional[int]:
warnings.warn(__A , __A )
requires_backends(__A , """sklearn""" )
lowercase_ : Dict = simple_accuracy(__A , __A )
lowercase_ : List[Any] = fa_score(y_true=__A , y_pred=__A )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict ) -> Optional[Any]:
warnings.warn(__A , __A )
requires_backends(__A , """sklearn""" )
lowercase_ : Any = pearsonr(__A , __A )[0]
lowercase_ : Any = spearmanr(__A , __A )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) -> Dict:
warnings.warn(__A , __A )
requires_backends(__A , """sklearn""" )
assert len(__A ) == len(__A ), F'''Predictions and labels have mismatched lengths {len(__A )} and {len(__A )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(__A , __A )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "mrpc":
return acc_and_fa(__A , __A )
elif task_name == "sts-b":
return pearson_and_spearman(__A , __A )
elif task_name == "qqp":
return acc_and_fa(__A , __A )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__A , __A )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__A , __A )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "rte":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "hans":
return {"acc": simple_accuracy(__A , __A )}
else:
raise KeyError(__A )
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] ) -> Any:
warnings.warn(__A , __A )
requires_backends(__A , """sklearn""" )
if len(__A ) != len(__A ):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(__A )} and {len(__A )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(__A , __A )}
else:
raise KeyError(__A )
| 239 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
)
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
UpperCamelCase__ = [90, 23, 6, 33, 21, 65, 123, 34423]
UpperCamelCase__ = math.log(len(__A ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __A , __A , __A )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 80 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : str = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
A_ : Tuple = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
A_ : Dict = {"""facebook/blenderbot-3B""": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def snake_case () -> Optional[int]:
UpperCamelCase_: Union[str, Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
UpperCamelCase_: Union[str, Any] = bs[:]
UpperCamelCase_: Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
UpperCamelCase_: str = [chr(_A ) for n in cs]
return dict(zip(_A , _A ) )
def snake_case (UpperCAmelCase__ ) -> Tuple:
UpperCamelCase_: Union[str, Any] = set()
UpperCamelCase_: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase_: Union[str, Any] = char
return pairs
class _lowerCAmelCase( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a : int =VOCAB_FILES_NAMES
a : List[str] =PRETRAINED_VOCAB_FILES_MAP
a : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] =['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , **_lowerCamelCase , ):
UpperCamelCase_: Any = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
UpperCamelCase_: List[str] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
UpperCamelCase_: int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
UpperCamelCase_: Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
UpperCamelCase_: Any = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
UpperCamelCase_: Any = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_: Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , )
with open(snake_case__ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase_: str = json.load(snake_case__ )
UpperCamelCase_: List[Any] = {v: k for k, v in self.encoder.items()}
UpperCamelCase_: Dict = errors # how to handle errors in decoding
UpperCamelCase_: int = bytes_to_unicode()
UpperCamelCase_: Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case__ , encoding='utf-8' ) as merges_handle:
UpperCamelCase_: str = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase_: str = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase_: Tuple = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCamelCase_: List[str] = {}
UpperCamelCase_: Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase_: Optional[int] = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _a ( self ):
return len(self.encoder )
def _a ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self , _lowerCamelCase ):
if token in self.cache:
return self.cache[token]
UpperCamelCase_: Optional[Any] = tuple(snake_case__ )
UpperCamelCase_: Tuple = get_pairs(snake_case__ )
if not pairs:
return token
while True:
UpperCamelCase_: List[str] = min(snake_case__ , key=lambda _lowerCamelCase : self.bpe_ranks.get(snake_case__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase_: Optional[Any] = bigram
UpperCamelCase_: Tuple = []
UpperCamelCase_: str = 0
while i < len(snake_case__ ):
try:
UpperCamelCase_: List[Any] = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase_: str = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase_: List[str] = tuple(snake_case__ )
UpperCamelCase_: int = new_word
if len(snake_case__ ) == 1:
break
else:
UpperCamelCase_: Tuple = get_pairs(snake_case__ )
UpperCamelCase_: Union[str, Any] = ' '.join(snake_case__ )
UpperCamelCase_: Union[str, Any] = word
return word
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: int = []
for token in re.findall(self.pat , snake_case__ ):
UpperCamelCase_: Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case__ ).split(' ' ) )
return bpe_tokens
def _a ( self , _lowerCamelCase ):
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def _a ( self , _lowerCamelCase ):
return self.decoder.get(snake_case__ )
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: Dict = ''.join(snake_case__ )
UpperCamelCase_: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_: Any = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_: str = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + '\n' )
UpperCamelCase_: int = 0
with open(snake_case__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase_: Tuple = token_index
writer.write(' '.join(snake_case__ ) + '\n' )
index += 1
return vocab_file, merge_file
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
UpperCamelCase_: List[str] = [self.sep_token_id]
UpperCamelCase_: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ):
UpperCamelCase_: Optional[int] = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case__ ) > 0 and not text[0].isspace()):
UpperCamelCase_: Tuple = ' ' + text
return (text, kwargs)
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
return token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(snake_case__ )
UpperCamelCase_: List[Any] = ' '.join(snake_case__ )
UpperCamelCase_: str = self.encode(snake_case__ )
if len(snake_case__ ) > self.model_max_length:
UpperCamelCase_: Any = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids | 353 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 292 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : Optional[Any] = logging.get_logger(__name__)
a : Tuple = "T5Config"
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = jnp.zeros_like(__magic_name__ )
UpperCAmelCase : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : str = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCAmelCase : Any = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : Dict = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = "mt5"
SCREAMING_SNAKE_CASE__ : str = MTaConfig
| 311 | 1 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__(self : List[Any] , a__ : List[str] , a__ : int=13 , a__ : Optional[int]=30 , a__ : int=2 , a__ : List[Any]=3 , a__ : str=True , a__ : int=True , a__ : str=32 , a__ : Optional[int]=5 , a__ : Optional[Any]=4 , a__ : Any=37 , a__ : str="gelu" , a__ : Any=0.1 , a__ : List[Any]=0.1 , a__ : int=10 , a__ : str=0.0_2 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def a (self : Tuple , a__ : List[Any] , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = FlaxViTModel(config=lowercase_ )
__snake_case = model(lowercase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (self.image_size, self.image_size)
__snake_case = (self.patch_size, self.patch_size)
__snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def a (self : int , a__ : List[Any] , a__ : List[str] ):
"""simple docstring"""
__snake_case = self.type_sequence_label_size
__snake_case = FlaxViTForImageClassification(config=lowercase_ )
__snake_case = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case = 1
__snake_case = FlaxViTForImageClassification(lowercase_ )
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(lowercase_ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
__snake_case
) = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE__ ( a_ , unittest.TestCase ):
A_ : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = FlaxViTModelTester(self )
__snake_case = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def a (self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(lowercase_ )
__snake_case = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def a (self : str ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case = self._prepare_for_class(lowercase_ , lowercase_ )
__snake_case = model_class(lowercase_ )
@jax.jit
def model_jitted(a__ : List[Any] , **a__ : Optional[int] ):
return model(pixel_values=lowercase_ , **lowercase_ )
with self.subTest('''JIT Enabled''' ):
__snake_case = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__snake_case = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a (self : List[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__snake_case = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
__snake_case = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowercase_ )
| 358 |
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : list ):
"""simple docstring"""
__snake_case = set_counts
__snake_case = max(a__ )
__snake_case = len(a__ )
__snake_case = [1] * num_sets
__snake_case = list(range(a__ ) )
def a (self : str , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = self.get_parent(a__ )
__snake_case = self.get_parent(a__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__snake_case = 0
__snake_case = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__snake_case = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__snake_case = 0
__snake_case = src_parent
__snake_case = self.set_counts[src_parent]
__snake_case = max(self.max_set , a__ )
return True
def a (self : Union[str, Any] , a__ : int ):
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
__snake_case = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 238 | 0 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 139 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
lowercase_ = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
lowercase_ = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
lowercase_ = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def lowerCAmelCase (__A , __A , __A , __A , __A = None , __A = False , ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
_a = new_id
# turn into Numpy arrays
_a = np.array(__A)
_a = np.array(__A)
if reduce_labels:
_a = 255
_a = label - 1
_a = 255
_a = label != ignore_index
_a = np.not_equal(__A , __A)
_a = pred_label[mask]
_a = np.array(__A)[mask]
_a = pred_label[pred_label == label]
_a = np.histogram(__A , bins=__A , range=(0, num_labels - 1))[0]
_a = np.histogram(__A , bins=__A , range=(0, num_labels - 1))[0]
_a = np.histogram(__A , bins=__A , range=(0, num_labels - 1))[0]
_a = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowerCAmelCase (__A , __A , __A , __A , __A = None , __A = False , ):
"""simple docstring"""
_a = np.zeros((num_labels,) , dtype=np.floataa)
_a = np.zeros((num_labels,) , dtype=np.floataa)
_a = np.zeros((num_labels,) , dtype=np.floataa)
_a = np.zeros((num_labels,) , dtype=np.floataa)
for result, gt_seg_map in zip(__A , __A):
_a , _a , _a , _a = intersect_and_union(
__A , __A , __A , __A , __A , __A)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowerCAmelCase (__A , __A , __A , __A , __A = None , __A = None , __A = False , ):
"""simple docstring"""
_a , _a , _a , _a = total_intersect_and_union(
__A , __A , __A , __A , __A , __A)
# compute metrics
_a = {}
_a = total_area_intersect.sum() / total_area_label.sum()
_a = total_area_intersect / total_area_union
_a = total_area_intersect / total_area_label
_a = np.nanmean(__A)
_a = np.nanmean(__A)
_a = all_acc
_a = iou
_a = acc
if nan_to_num is not None:
_a = {metric: np.nan_to_num(__A , nan=__A) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def a__ (self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def a__ (self , A , A , A , A , A = None , A = None , A = False , ) -> List[Any]:
"""simple docstring"""
_a = mean_iou(
results=A , gt_seg_maps=A , num_labels=A , ignore_index=A , nan_to_num=A , label_map=A , reduce_labels=A , )
return iou_result
| 211 | 0 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if is_torch_version("<" , "2.0.0" ) or not hasattr(_UpperCamelCase , "_dynamo" ):
return False
return isinstance(_UpperCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = True ):
'''simple docstring'''
__lowerCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowerCAmelCase = is_compiled_module(_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = model.module
if not keep_fpaa_wrapper:
__lowerCAmelCase = getattr(_UpperCamelCase , "forward" )
__lowerCAmelCase = model.__dict__.pop("_original_forward" , _UpperCamelCase )
if original_forward is not None:
while hasattr(_UpperCamelCase , "__wrapped__" ):
__lowerCAmelCase = forward.__wrapped__
if forward == original_forward:
break
__lowerCAmelCase = forward
if getattr(_UpperCamelCase , "_converted_to_transformer_engine" , _UpperCamelCase ):
convert_model(_UpperCamelCase , to_transformer_engine=_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = compiled_model
return model
def _lowerCamelCase ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_UpperCamelCase , _UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(_UpperCamelCase , _UpperCamelCase )
@contextmanager
def _lowerCamelCase ( **_UpperCamelCase ):
'''simple docstring'''
for key, value in kwargs.items():
__lowerCAmelCase = str(_UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not hasattr(_UpperCamelCase , "__qualname__" ) and not hasattr(_UpperCamelCase , "__name__" ):
__lowerCAmelCase = getattr(_UpperCamelCase , "__class__" , _UpperCamelCase )
if hasattr(_UpperCamelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(_UpperCamelCase , "__name__" ):
return obj.__name__
return str(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for key, value in source.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = destination.setdefault(_UpperCamelCase , {} )
merge_dicts(_UpperCamelCase , _UpperCamelCase )
else:
__lowerCAmelCase = value
return destination
def _lowerCamelCase ( _UpperCamelCase = None ):
'''simple docstring'''
if port is None:
__lowerCAmelCase = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 371 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
A : Optional[int] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=16 , __a=13 , __a=7 , __a=14 , __a=10 , __a=19 , __a=5 , __a=4 , __a=True , __a=16 , __a=2 , __a=4 , __a=4 , __a="gelu" , __a=0.1 , __a=0.1 , __a=[1, 2, 3, 4, 5] , __a=25 , __a=5 , ):
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def snake_case ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def snake_case ( self , __a ):
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def snake_case ( self ):
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self , __a , __a ):
__lowerCAmelCase = AutoformerModel(config=__a ).to(__a ).eval()
__lowerCAmelCase = model(**__a )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(__a )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**__a )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(__a )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowerCAmelCase = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =(AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCAmelCase : List[Any] =(AutoformerForPrediction,) if is_torch_available() else ()
__UpperCAmelCase : Tuple ={"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
__UpperCAmelCase : Tuple =False
__UpperCAmelCase : Any =False
__UpperCAmelCase : Dict =False
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Optional[Any] =False
def snake_case ( self ):
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , has_text_modality=__a )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["missing_keys"] , [] )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="Model has no tokens embeddings" )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase = inspect.signature(getattr(__a , "forward" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , "seq_length" , __a )
__lowerCAmelCase = getattr(self.model_tester , "decoder_seq_length" , __a )
__lowerCAmelCase = getattr(self.model_tester , "encoder_seq_length" , __a )
__lowerCAmelCase = getattr(self.model_tester , "d_model" , __a )
__lowerCAmelCase = getattr(self.model_tester , "num_attention_heads" , __a )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(__a )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def snake_case ( self ):
super().test_retain_grad_hidden_states_attentions()
def _lowerCamelCase ( _UpperCamelCase="train-batch.pt" ):
'''simple docstring'''
__lowerCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=_UpperCamelCase , repo_type="dataset" )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=_UpperCamelCase )
return batch
@require_torch
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowerCAmelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def snake_case ( self ):
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
__lowerCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowerCAmelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def snake_case ( self ):
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
__lowerCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowerCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=__a )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1e-1 ) )
| 259 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : str = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
__UpperCAmelCase : str = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
__UpperCAmelCase : Dict = {
"abeja/gpt-neox-japanese-2.7b": 2_048,
}
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[Any]:
with open(_lowerCamelCase , """r""" , encoding="""utf-8""") as f:
__snake_case: Optional[int] = json.loads(f.read())
__snake_case: Union[str, Any] = collections.OrderedDict()
__snake_case: Optional[int] = collections.OrderedDict()
__snake_case: Union[str, Any] = collections.OrderedDict()
with open(_lowerCamelCase , """r""" , encoding="""utf-8""") as f:
__snake_case: int = f.readlines()
__snake_case: List[str] = [[t.rstrip("""\n""")] if (t == "," or "," not in t) else t.rstrip("""\n""").split(""",""") for t in token]
for idx, b in enumerate(_lowerCamelCase):
__snake_case: Optional[Any] = b
__snake_case: List[Any] = idx
for wd in b:
__snake_case: Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __snake_case ( a__ ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , A : Dict , A : List[str] , A : Optional[Any]="<|endoftext|>" , A : List[Any]="<|endoftext|>" , A : Optional[int]="<|startoftext|>" , A : Dict="<|endoftext|>" , A : Optional[Any]=False , **A : List[str] , ):
super().__init__(
unk_token=A , pad_token=A , bos_token=A , eos_token=A , do_clean_text=A , **A , )
if not os.path.isfile(A ):
raise ValueError(
f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(A ):
raise ValueError(
f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__snake_case: List[str] = do_clean_text
__snake_case: Union[str, Any] = load_vocab_and_emoji(A , A )
__snake_case: Tuple = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def UpperCAmelCase__ ( self : List[Any] ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Any , A : Optional[int] ):
return self.subword_tokenizer.tokenize(A , clean=self.do_clean_text )
def UpperCAmelCase__ ( self : str , A : List[Any] ):
return self.vocab.get(A , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Tuple , A : Union[str, Any] ):
return self.subword_tokenizer.convert_id_to_token(A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[str] ):
__snake_case: int = "".join(A ).strip()
return out_string
def UpperCAmelCase__ ( self : str , A : "Conversation" ):
__snake_case: str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A , add_special_tokens=A ) + [self.eos_token_id] )
if len(A ) > self.model_max_length:
__snake_case: Tuple = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase__ ( self : List[str] , A : str , A : Optional[str] = None ):
__snake_case: List[str] = 0
if os.path.isdir(A ):
__snake_case: Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case: List[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__snake_case: Optional[int] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
__snake_case: Optional[Any] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(A , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__snake_case: List[str] = token_index
writer.write(""",""".join(A ) + """\n""" )
index += 1
with open(A , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , A )
return vocab_file, emoji_file
class __snake_case ( a__ ):
'''simple docstring'''
def __init__( self : Dict , A : Dict , A : Dict , A : Optional[Any] ):
__snake_case: List[Any] = vocab # same as swe
__snake_case: int = ids_to_tokens # same as bpe
__snake_case: List[Any] = emoji
__snake_case: Union[str, Any] = np.max([len(A ) for w in self.vocab.keys()] )
__snake_case: Any = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__snake_case: str = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__snake_case: List[str] = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__snake_case: Dict = re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__snake_case: Dict = re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__snake_case: Optional[Any] = re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__snake_case: List[str] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
__snake_case: Union[str, Any] = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
__snake_case: List[Any] = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : List[str] ):
return len(self.ids_to_tokens )
def UpperCAmelCase__ ( self : str , A : Union[str, Any] ):
__snake_case: Dict = self.content_repattera.sub("""<URL>""" , A )
__snake_case: Any = self.content_repattera.sub("""<EMAIL>""" , A )
__snake_case: Optional[int] = self.content_repattera.sub("""<TEL>""" , A )
__snake_case: List[Any] = self.content_repattera.sub("""<DATE>""" , A )
__snake_case: Optional[int] = self.content_repattera.sub("""<DATE>""" , A )
__snake_case: int = self.content_repattera.sub("""<PRICE>""" , A )
__snake_case: Union[str, Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__snake_case: Tuple = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def UpperCAmelCase__ ( self : str , A : Tuple , A : Optional[Any]=False ):
__snake_case: int = text.replace(""" """ , """<SP>""" )
__snake_case: Dict = text.replace(""" """ , """<SP>""" )
__snake_case: Tuple = text.replace("""\r\n""" , """<BR>""" )
__snake_case: List[str] = text.replace("""\n""" , """<BR>""" )
__snake_case: str = text.replace("""\r""" , """<BR>""" )
__snake_case: List[Any] = text.replace("""\t""" , """<TAB>""" )
__snake_case: str = text.replace("""—""" , """ー""" )
__snake_case: int = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__snake_case: Optional[Any] = text.replace(A , A )
if clean:
__snake_case: Tuple = self.clean_text(A )
def check_simbol(A : List[str] ):
__snake_case: str = x.encode()
if len(A ) == 1 and len(A ) == 2:
__snake_case: Optional[Any] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2_a1 and c <= 0xc2_bf)
or (c >= 0xc7_80 and c <= 0xc7_83)
or (c >= 0xca_b9 and c <= 0xcb_bf)
or (c >= 0xcc_80 and c <= 0xcd_a2)
):
return True
return False
def checkuae(A : Optional[Any] ):
__snake_case: List[Any] = x.encode()
if len(A ) == 1 and len(A ) == 3:
__snake_case: Optional[Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe2_80_80 and c <= 0xe2_b0_7f:
return True
return False
__snake_case: int = 0
__snake_case: int = []
while pos < len(A ):
__snake_case: List[str] = min(len(A ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
__snake_case: int = [] # (token_id, token, pos)
for e in range(A , A , -1 ):
__snake_case: Union[str, Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(A ) > 2:
__snake_case: Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(A ) > 0:
# the smallest token_id is adopted
__snake_case: List[Any] = sorted(A , key=lambda A : x[0] )[0]
result.append(A )
__snake_case: List[Any] = e
else:
__snake_case: Union[str, Any] = pos + 1
__snake_case: int = text[pos:end]
if check_simbol(A ):
result.append("""<KIGOU>""" )
elif checkuae(A ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__snake_case: str = end
return result
def UpperCAmelCase__ ( self : Optional[int] , A : Optional[Any] , A : Dict="\n" ):
__snake_case: str = []
__snake_case: str = []
__snake_case: int = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(A ) > 0:
words.append(bytearray(A ).decode("""utf-8""" , errors="""replace""" ) )
__snake_case: str = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(A )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(A )
if len(A ) > 0:
words.append(bytearray(A ).decode("""utf-8""" , errors="""replace""" ) )
__snake_case: Any = "".join(A )
return text
| 111 | """simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = CTRLTokenizer
snake_case__ = False
snake_case__ = False
def lowerCamelCase__ ( self : Union[str, Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase : Optional[int] = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
__lowerCamelCase : str = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
__lowerCamelCase : Any = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
__lowerCamelCase : Dict = {"unk_token": "<unk>"}
__lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase ) )
def lowerCamelCase__ ( self : Tuple , **UpperCAmelCase : List[str] ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Dict ):
__lowerCamelCase : Any = "adapt react readapt apt"
__lowerCamelCase : Dict = "adapt react readapt apt"
return input_text, output_text
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Dict = "adapt react readapt apt"
__lowerCamelCase : Dict = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
__lowerCamelCase : List[str] = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Any = tokens + [tokenizer.unk_token]
__lowerCamelCase : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase ) | 135 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCamelCase : Union[str, Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ):
for attribute in key.split('''.''' ):
lowerCAmelCase__ : List[Any] = getattr(A_ , A_ )
if weight_type is not None:
lowerCAmelCase__ : int = getattr(A_ , A_ ).shape
else:
lowerCAmelCase__ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCAmelCase__ : List[Any] = value
elif weight_type == "weight_g":
lowerCAmelCase__ : Optional[Any] = value
elif weight_type == "weight_v":
lowerCAmelCase__ : List[str] = value
elif weight_type == "bias":
lowerCAmelCase__ : str = value
else:
lowerCAmelCase__ : str = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[Any] = fairseq_model.state_dict()
lowerCAmelCase__ : Tuple = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCAmelCase__ : Dict = None
for name, value in fairseq_dict.items():
lowerCAmelCase__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
A_ , A_ , A_ , A_ , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase__ : Optional[int] = True
elif name.split('''.''' )[0] == "proj":
lowerCAmelCase__ : Optional[int] = fairseq_model.proj
lowerCAmelCase__ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCAmelCase__ : Union[str, Any] = True
if "*" in mapped_key:
lowerCAmelCase__ : str = name.split(A_ )[0].split('''.''' )[-2]
lowerCAmelCase__ : Union[str, Any] = mapped_key.replace('''*''' , A_ )
if "weight_g" in name:
lowerCAmelCase__ : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase__ : Tuple = '''weight_v'''
elif "bias" in name:
lowerCAmelCase__ : Optional[Any] = '''bias'''
elif "weight" in name:
lowerCAmelCase__ : Optional[Any] = '''weight'''
else:
lowerCAmelCase__ : str = None
set_recursively(A_ , A_ , A_ , A_ , A_ )
continue
if not is_used:
unused_weights.append(A_ )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Optional[int] = full_name.split('''conv_layers.''' )[-1]
lowerCAmelCase__ : str = name.split('''.''' )
lowerCAmelCase__ : Tuple = int(items[0] )
lowerCAmelCase__ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCAmelCase__ : Any = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCAmelCase__ : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCAmelCase__ : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCAmelCase__ : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = emb.weight.shape
lowerCAmelCase__ : Optional[int] = nn.Linear(A_ , A_ , bias=A_ )
lowerCAmelCase__ : Optional[int] = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( A_ ):
with open(A_ , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ : Dict = f.readlines()
lowerCAmelCase__ : Optional[Any] = [line.split(''' ''' )[0] for line in lines]
lowerCAmelCase__ : Dict = len(A_ )
lowerCAmelCase__ : Optional[int] = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(A_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
lowerCAmelCase__ : int = WavaVecaConfig.from_pretrained(A_ )
lowerCAmelCase__ : Any = SpeechaTextaConfig.from_pretrained(
A_ , vocab_size=A_ , decoder_layers=A_ , do_stable_layer_norm=A_ )
lowerCAmelCase__ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=A_ , return_attention_mask=A_ , )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowerCAmelCase__ : List[str] = model[0].eval()
# set weights for wav2vec2 encoder
lowerCAmelCase__ : Optional[Any] = WavaVecaModel(A_ )
lowerCAmelCase__ : int = recursively_load_weights_wavaveca(model.encoder , A_ )
lowerCAmelCase__ : Tuple = SpeechaTextaForCausalLM(A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=A_ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
lowerCAmelCase__ : List[str] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowerCAmelCase__ : int = SpeechEncoderDecoderModel(encoder=A_ , decoder=A_ )
lowerCAmelCase__ : List[Any] = False
# add projection layer
lowerCAmelCase__ : str = nn.Parameter(projection_layer.weight )
lowerCAmelCase__ : List[Any] = nn.Parameter(projection_layer.bias )
lowerCAmelCase__ : Optional[int] = create_vocab_dict(A_ )
with open(os.path.join(A_ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(A_ , A_ )
lowerCAmelCase__ : Dict = SpeechaTextaTokenizer(os.path.join(A_ , '''vocab.json''' ) )
tokenizer.save_pretrained(A_ )
lowerCAmelCase__ : str = hf_wavavec.config.to_dict()
lowerCAmelCase__ : Any = tokenizer.pad_token_id
lowerCAmelCase__ : Optional[Any] = tokenizer.bos_token_id
lowerCAmelCase__ : Tuple = tokenizer.eos_token_id
lowerCAmelCase__ : Optional[Any] = '''speech_to_text_2'''
lowerCAmelCase__ : Dict = '''wav2vec2'''
lowerCAmelCase__ : str = SpeechEncoderDecoderConfig.from_dict(A_ )
hf_wavavec.save_pretrained(A_ )
feature_extractor.save_pretrained(A_ )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0_2_2_4, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
__UpperCamelCase : List[str] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 74 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
if not isinstance(A_ , A_ ):
lowerCAmelCase__ : int = f'Input value of [number={number}] must be an integer'
raise TypeError(A_ )
if number < 0:
return False
lowerCAmelCase__ : List[Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : List[str] , lowercase : str , lowercase : int , lowercase : Union[str, Any] , lowercase : Tuple , lowercase : str ) -> Dict:
"""simple docstring"""
if index == r:
for j in range(__snake_case ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
snake_case : Tuple = arr[i]
combination_util(__snake_case , __snake_case , __snake_case , index + 1 , __snake_case , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowerCAmelCase ( lowercase : List[str] , lowercase : List[str] , lowercase : Dict ) -> str:
"""simple docstring"""
snake_case : Tuple = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__snake_case , __snake_case , __snake_case , 0 , __snake_case , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__snake_case = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 203 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Tuple = ort.SessionOptions()
lowercase_ : str = False
return options
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
lowercase_ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
lowercase_ : str = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' ,revision='onnx' ,safety_checker=__UpperCamelCase ,feature_extractor=__UpperCamelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : Tuple = 'A red cat sitting on a park bench'
lowercase_ : int = np.random.RandomState(0 )
lowercase_ : Any = pipe(
prompt=__UpperCamelCase ,image=__UpperCamelCase ,mask_image=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=__UpperCamelCase ,output_type='np' ,)
lowercase_ : List[Any] = output.images
lowercase_ : int = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase_ : Optional[Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
lowercase_ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
lowercase_ : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' ,subfolder='scheduler' ,revision='onnx' )
lowercase_ : Any = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' ,revision='onnx' ,scheduler=__UpperCamelCase ,safety_checker=__UpperCamelCase ,feature_extractor=__UpperCamelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : Optional[Any] = 'A red cat sitting on a park bench'
lowercase_ : Optional[int] = np.random.RandomState(0 )
lowercase_ : Any = pipe(
prompt=__UpperCamelCase ,image=__UpperCamelCase ,mask_image=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=__UpperCamelCase ,output_type='np' ,)
lowercase_ : Optional[Any] = output.images
lowercase_ : Optional[Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase_ : List[Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 321 | """simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]]
lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 )
lowercase_ : str = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 )
lowercase_ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 )
lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase )
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 321 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ :Tuple = logging.get_logger(__name__)
lowercase__ :Dict = {"tokenizer_file": "tokenizer.json"}
lowercase__ :Dict = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[int] =VOCAB_FILES_NAMES
lowercase_ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Any =['''input_ids''', '''attention_mask''']
lowercase_ : List[str] =None
def __init__( self ,A__=None ,A__=None ,A__=None ,A__="<unk>" ,A__="<s>" ,A__="</s>" ,A__="<pad>" ,A__=False ,A__=False ,**A__ ,):
super().__init__(
A__ ,A__ ,tokenizer_file=A__ ,unk_token=A__ ,bos_token=A__ ,eos_token=A__ ,pad_token=A__ ,add_prefix_space=A__ ,clean_up_tokenization_spaces=A__ ,**A__ ,)
lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' ,A__) != add_prefix_space:
lowercase = getattr(A__ ,pre_tok_state.pop('''type'''))
lowercase = add_prefix_space
lowercase = pre_tok_class(**A__)
lowercase = add_prefix_space
def A__ ( self ,*A__ ,**A__):
lowercase = kwargs.get('''is_split_into_words''' ,A__)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
''' pretokenized inputs.''')
return super()._batch_encode_plus(*A__ ,**A__)
def A__ ( self ,*A__ ,**A__):
lowercase = kwargs.get('''is_split_into_words''' ,A__)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
''' pretokenized inputs.''')
return super()._encode_plus(*A__ ,**A__)
def A__ ( self ,A__ ,A__ = None):
lowercase = self._tokenizer.model.save(A__ ,name=A__)
return tuple(A__)
def A__ ( self ,A__):
lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A__ ,add_special_tokens=A__) + [self.eos_token_id])
if len(A__) > self.model_max_length:
lowercase = input_ids[-self.model_max_length :]
return input_ids
| 101 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =StableDiffusionInstructPixaPixPipeline
lowercase : List[Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowercase : Optional[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase : List[Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
lowerCamelCase_ =PNDMScheduler(skip_prk_steps=lowerCAmelCase )
torch.manual_seed(0 )
lowerCamelCase_ =AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
torch.manual_seed(0 )
lowerCamelCase_ =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, )
lowerCamelCase_ =CLIPTextModel(lowerCAmelCase )
lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase_ ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase_ =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowerCamelCase_ =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ =np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowerCamelCase_ =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ ='''french fries'''
lowerCamelCase_ =sd_pipe(**lowerCAmelCase, negative_prompt=lowerCAmelCase )
lowerCamelCase_ =output.images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ =np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowerCamelCase_ =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ =[inputs['''prompt''']] * 2
lowerCamelCase_ =np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0
lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase )
lowerCamelCase_ =image / 2 + 0.5
lowerCamelCase_ =image.permute(0, 3, 1, 2 )
lowerCamelCase_ =image.repeat(2, 1, 1, 1 )
lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowerCamelCase_ =np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='''scaled_linear''' )
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowerCamelCase_ =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
lowerCamelCase_ =[round(lowerCAmelCase, 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ =np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowerCamelCase_ =VaeImageProcessor(do_resize=lowerCAmelCase, do_normalize=lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) )[0]
lowerCamelCase_ =components['''vae''']
lowerCamelCase_ =self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowerCamelCase_ =vae.encode(inputs[image_param] ).latent_dist.mode()
lowerCamelCase_ =pipe(**lowerCAmelCase )[0]
lowerCamelCase_ =np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase, 1e-4, '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
lowerCamelCase_ ={
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ =self.get_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase )
lowerCamelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ =self.get_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase )
lowerCamelCase_ =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ =self.get_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =0
def callback_fn(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) -> None:
lowerCamelCase_ =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase_ =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase_ =latents[0, -3:, -3:, -1]
lowerCamelCase_ =np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase_ =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase_ =latents[0, -3:, -3:, -1]
lowerCamelCase_ =np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase_ =False
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ =self.get_inputs()
pipe(**lowerCAmelCase, callback=lowerCAmelCase, callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase__ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ =self.get_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase )
lowerCamelCase_ =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ =inputs['''image'''].resize((504, 504) )
lowerCamelCase_ ='''timbrooks/instruct-pix2pix'''
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase, safety_checker=lowerCAmelCase, )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ =pipe(**lowerCAmelCase )
lowerCamelCase_ =output.images[0]
lowerCamelCase_ =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowerCamelCase_ =np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 75 | 0 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
a : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
self.check_model_type(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Tuple:
a : List[str] = {}, {}
if padding is not None:
a : Union[str, Any] = padding
if truncation is not None:
a : Optional[int] = truncation
if top_k is not None:
a : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Union[str, Any]:
if isinstance(lowerCAmelCase__ , (Image.Image, str) ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Optional[int] = {"image": image, "question": question}
else:
a : int = image
a : List[Any] = super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
return results
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple:
a : str = load_image(inputs["image"] )
a : List[str] = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
a : Any = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase__ )
return model_inputs
def __a ( self , lowerCAmelCase__ ) -> Any:
a : int = self.model(**lowerCAmelCase__ )
return model_outputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=5 ) -> str:
if top_k > self.model.config.num_labels:
a : List[Any] = self.model.config.num_labels
if self.framework == "pt":
a : List[Any] = model_outputs.logits.sigmoid()[0]
a : int = probs.topk(lowerCAmelCase__ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
a : Union[str, Any] = scores.tolist()
a : Any = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
| 368 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> List[Any]:
a : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a : List[Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowerCAmelCase__ )
a : Union[str, Any] = -1
a : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
a : Tuple = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ )
a : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
a : List[str] = TextStreamer(lowerCAmelCase__ )
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a : int = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowerCAmelCase__ )
a : List[str] = -1
a : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
a : Union[str, Any] = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ )
a : Dict = tokenizer.decode(greedy_ids[0] )
a : str = TextIteratorStreamer(lowerCAmelCase__ )
a : str = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
a : Tuple = Thread(target=model.generate , kwargs=lowerCAmelCase__ )
thread.start()
a : Dict = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Tuple:
a : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowerCAmelCase__ )
a : Optional[int] = -1
a : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
a : List[str] = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ )
a : Optional[Any] = greedy_ids[:, input_ids.shape[1] :]
a : str = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
a : Tuple = TextStreamer(lowerCAmelCase__ , skip_prompt=lowerCAmelCase__ )
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Tuple:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
a : List[str] = AutoTokenizer.from_pretrained("distilgpt2" )
a : Any = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(lowerCAmelCase__ )
a : Optional[int] = -1
a : Union[str, Any] = torch.ones((1, 5) , device=lowerCAmelCase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
a : Any = TextStreamer(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
model.generate(lowerCAmelCase__ , max_new_tokens=1 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
a : Optional[int] = cs.out[:-1] # Remove the final "\n"
a : List[Any] = tokenizer(lowerCAmelCase__ , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __a ( self ) -> Dict:
a : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a : List[Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowerCAmelCase__ )
a : str = -1
a : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
a : List[Any] = TextIteratorStreamer(lowerCAmelCase__ , timeout=0.001 )
a : List[Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
a : int = Thread(target=model.generate , kwargs=lowerCAmelCase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCAmelCase__ ):
a : int = ""
for new_text in streamer:
streamer_text += new_text
| 79 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''MobileViTFeatureExtractor''']
_a = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
_UpperCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F"""{len(__lowerCAmelCase )} != {len(__lowerCAmelCase )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_a = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_a = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Dict:
"""simple docstring"""
try:
_UpperCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(__lowerCAmelCase ) )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> List[int]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __A ( __lowerCAmelCase , __lowerCAmelCase = "student" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , )-> Tuple[PreTrainedModel, List[int], List[int]]:
"""simple docstring"""
_UpperCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase ) # purely for convenience
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).eval()
else:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), F"""teacher must be a model or string got type {type(__lowerCAmelCase )}"""
_UpperCAmelCase = teacher.config.to_diff_dict()
try:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowerCAmelCase )
# Copy weights
_UpperCAmelCase = teacher.config_class(**__lowerCAmelCase )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase = student.load_state_dict(teacher.state_dict() , strict=__lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase , _UpperCAmelCase = list(range(__lowerCAmelCase ) ), list(range(__lowerCAmelCase ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(__lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
if d_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
try:
if hasattr(
__lowerCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowerCAmelCase )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
_UpperCAmelCase = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(__lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 39 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: int ={
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[Any] =[
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 367 | '''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ = 0.01
with locka.acquire():
with pytest.raises(snake_case_ ):
UpperCAmelCase_ = time.time()
locka.acquire(snake_case_ )
assert time.time() - _start > timeout
def lowerCAmelCase_ ( snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = "a" * 10_00 + ".lock"
UpperCAmelCase_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(snake_case_ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
UpperCAmelCase_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case_ ):
locka.acquire(0 )
| 106 | 0 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
lowerCAmelCase__ = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
lowerCAmelCase__ = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCAmelCase__ = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 153 |
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 92 | 0 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
_lowerCAmelCase = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
_lowerCAmelCase = parser.parse_args()
if args.check_lib:
_lowerCAmelCase = importlib.import_module('''transformers''')
_lowerCAmelCase = Path(transformers_module.__file__).parent
else:
_lowerCAmelCase = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 184 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[Any] = '''instructblip_vision_model'''
def __init__( self ,__UpperCAmelCase=1408 ,__UpperCAmelCase=6144 ,__UpperCAmelCase=39 ,__UpperCAmelCase=16 ,__UpperCAmelCase=224 ,__UpperCAmelCase=14 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=1E-6 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=1E-10 ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> List[Any]:
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = patch_size
lowerCAmelCase__ : int = image_size
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : Optional[int] = attention_dropout
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : int = qkv_bias
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,**__UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cls.get_config_dict(__UpperCAmelCase ,**__UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
lowerCAmelCase__ : Optional[int] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase ,**__UpperCAmelCase )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = '''instructblip_qformer'''
def __init__( self ,__UpperCAmelCase=3_0522 ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=3072 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=512 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-12 ,__UpperCAmelCase=0 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=2 ,__UpperCAmelCase=1408 ,**__UpperCAmelCase ,) -> Tuple:
super().__init__(pad_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : Any = initializer_range
lowerCAmelCase__ : Tuple = layer_norm_eps
lowerCAmelCase__ : Dict = position_embedding_type
lowerCAmelCase__ : int = cross_attention_frequency
lowerCAmelCase__ : List[Any] = encoder_hidden_size
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,**__UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Any = cls.get_config_dict(__UpperCAmelCase ,**__UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
lowerCAmelCase__ : Tuple = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase ,**__UpperCAmelCase )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Any = '''instructblip'''
__lowercase : str = True
def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=32 ,**__UpperCAmelCase ) -> Any:
super().__init__(**__UpperCAmelCase )
if vision_config is None:
lowerCAmelCase__ : Any = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
lowerCAmelCase__ : List[str] = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
lowerCAmelCase__ : List[Any] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
lowerCAmelCase__ : Any = InstructBlipVisionConfig(**__UpperCAmelCase )
lowerCAmelCase__ : Tuple = InstructBlipQFormerConfig(**__UpperCAmelCase )
lowerCAmelCase__ : Tuple = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowerCAmelCase__ : Any = CONFIG_MAPPING[text_model_type](**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = self.text_config.tie_word_embeddings
lowerCAmelCase__ : Any = self.text_config.is_encoder_decoder
lowerCAmelCase__ : int = num_query_tokens
lowerCAmelCase__ : List[str] = self.vision_config.hidden_size
lowerCAmelCase__ : Optional[int] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCAmelCase__ : Optional[Any] = 1.0
lowerCAmelCase__ : Dict = 0.0_2
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ,) -> Union[str, Any]:
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**__UpperCAmelCase ,)
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : str = self.vision_config.to_dict()
lowerCAmelCase__ : Union[str, Any] = self.qformer_config.to_dict()
lowerCAmelCase__ : Union[str, Any] = self.text_config.to_dict()
lowerCAmelCase__ : str = self.__class__.model_type
return output
| 184 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
torch.manual_seed(0 )
A = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A = self.dummy_uncond_unet
A = PNDMScheduler()
A = PNDMPipeline(unet=A_ ,scheduler=A_ )
pndm.to(A_ )
pndm.set_progress_bar_config(disable=A_ )
A = torch.manual_seed(0 )
A = pndm(generator=A_ ,num_inference_steps=20 ,output_type='numpy' ).images
A = torch.manual_seed(0 )
A = pndm(generator=A_ ,num_inference_steps=20 ,output_type='numpy' ,return_dict=A_ )[0]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
A = 'google/ddpm-cifar10-32'
A = UNetaDModel.from_pretrained(A_ )
A = PNDMScheduler()
A = PNDMPipeline(unet=A_ ,scheduler=A_ )
pndm.to(A_ )
pndm.set_progress_bar_config(disable=A_ )
A = torch.manual_seed(0 )
A = pndm(generator=A_ ,output_type='numpy' ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 74 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''roformer'''
def __init__( self : Tuple ,A_ : Optional[int]=5_0000 ,A_ : Tuple=None ,A_ : Optional[Any]=768 ,A_ : Dict=12 ,A_ : Optional[int]=12 ,A_ : Union[str, Any]=3072 ,A_ : Dict="gelu" ,A_ : Dict=0.1 ,A_ : List[Any]=0.1 ,A_ : List[Any]=1536 ,A_ : List[str]=2 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : Optional[int]=0 ,A_ : List[str]=False ,A_ : Tuple=True ,**A_ : List[str] ,) -> Dict:
super().__init__(pad_token_id=A_ ,**A_ )
A = vocab_size
A = hidden_size if embedding_size is None else embedding_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = rotary_value
A = use_cache
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 74 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_A = '''\
Text data.
Second line of data.'''
_A = '''file'''
@pytest.fixture(scope="""session""" )
def lowerCamelCase__ ( a__ : Optional[Any] ) -> Optional[Any]:
UpperCamelCase_ = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
UpperCamelCase_ = bytes(a__ , """utf-8""" )
with zstd.open(a__ , """wb""" ) as f:
f.write(a__ )
return path
@pytest.fixture
def lowerCamelCase__ ( a__ : Optional[Any] ) -> Tuple:
with open(os.path.join(tmpfs.local_root_dir , a__ ) , """w""" ) as f:
f.write(a__ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def lowerCamelCase__ ( a__ : Union[str, Any] , a__ : List[Any] , a__ : Any , a__ : Tuple , a__ : List[Any] , a__ : Optional[Any] ) -> Dict:
UpperCamelCase_ = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
UpperCamelCase_ = input_paths[compression_format]
UpperCamelCase_ = tmp_path / """cache"""
UpperCamelCase_ = DownloadConfig(cache_dir=a__ , extract_compressed_file=a__ )
UpperCamelCase_ = cached_path(a__ , download_config=a__ )
with open(a__ ) as f:
UpperCamelCase_ = f.read()
with open(a__ ) as f:
UpperCamelCase_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def lowerCamelCase__ ( a__ : Tuple , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Any , a__ : List[str] ) -> List[Any]:
UpperCamelCase_ = """custom_cache"""
UpperCamelCase_ = """custom_extracted_dir"""
UpperCamelCase_ = tmp_path / """custom_extracted_path"""
if default_extracted:
UpperCamelCase_ = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , a__ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a__ ) )
UpperCamelCase_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCamelCase_ = xz_file
UpperCamelCase_ = (
DownloadConfig(extract_compressed_file=a__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a__ )
)
UpperCamelCase_ = cached_path(a__ , download_config=a__ )
assert Path(a__ ).parent.parts[-2:] == expected
def lowerCamelCase__ ( a__ : Any ) -> Union[str, Any]:
# absolute path
UpperCamelCase_ = str(Path(a__ ).resolve() )
assert cached_path(a__ ) == text_file
# relative path
UpperCamelCase_ = str(Path(a__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a__ ) == text_file
def lowerCamelCase__ ( a__ : Union[str, Any] ) -> Any:
# absolute path
UpperCamelCase_ = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(a__ ):
cached_path(a__ )
# relative path
UpperCamelCase_ = """./__missing_file__.txt"""
with pytest.raises(a__ ):
cached_path(a__ )
def lowerCamelCase__ ( a__ : int ) -> Dict:
UpperCamelCase_ = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(a__ ) as f:
UpperCamelCase_ = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a__ )
def lowerCamelCase__ ( ) -> Union[str, Any]:
with pytest.raises(a__ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a__ )
def lowerCamelCase__ ( a__ : Dict ) -> Optional[Any]:
UpperCamelCase_ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a__ ):
http_get("""https://huggingface.co""" , temp_file=a__ )
with pytest.raises(a__ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a__ )
def lowerCamelCase__ ( a__ : Optional[int] ) -> Dict:
UpperCamelCase_ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a__ ):
ftp_get("""ftp://huggingface.co""" , temp_file=a__ )
with pytest.raises(a__ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a__ )
def lowerCamelCase__ ( a__ : Optional[int] ) -> Union[str, Any]:
UpperCamelCase_ = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(a__ ):
fsspec_get("""s3://huggingface.co""" , temp_file=a__ )
with pytest.raises(a__ ):
fsspec_head("""s3://huggingface.co""" )
| 261 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCamelCase__ ( a__ : BertModel , a__ : str , a__ : str ) -> Tuple:
UpperCamelCase_ = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
UpperCamelCase_ = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(a__ ):
os.makedirs(a__ )
UpperCamelCase_ = model.state_dict()
def to_tf_var_name(a__ : str ):
for patt, repl in iter(a__ ):
UpperCamelCase_ = name.replace(a__ , a__ )
return f'''bert/{name}'''
def create_tf_var(a__ : np.ndarray , a__ : str , a__ : tf.Session ):
UpperCamelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCamelCase_ = tf.get_variable(dtype=a__ , shape=tensor.shape , name=a__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(a__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCamelCase_ = to_tf_var_name(a__ )
UpperCamelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCamelCase_ = torch_tensor.T
UpperCamelCase_ = create_tf_var(tensor=a__ , name=a__ , session=a__ )
tf.keras.backend.set_value(a__ , a__ )
UpperCamelCase_ = session.run(a__ )
print(f'''Successfully created {tf_name}: {np.allclose(a__ , a__ )}''' )
UpperCamelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(a__ , os.path.join(a__ , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def lowerCamelCase__ ( a__ : Union[str, Any]=None ) -> Any:
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=a__ , required=a__ , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=a__ , default=a__ , required=a__ , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=a__ , required=a__ , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=a__ , required=a__ , help="""Directory in which to save tensorflow model""" )
UpperCamelCase_ = parser.parse_args(a__ )
UpperCamelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=a__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 261 | 1 |
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str , A : str , A : Optional[Any]=13 , A : Union[str, Any]=7 , A : List[str]=True , A : Union[str, Any]=True , A : Optional[int]=False , A : Any=True , A : Optional[int]=99 , A : str=64 , A : Any=5 , A : int=4 , A : List[str]=64 , A : Optional[int]="gelu" , A : Tuple=0.1 , A : Optional[Any]=0.1 , A : Dict=512 , A : List[Any]=16 , A : Any=2 , A : Optional[int]=0.02 , A : Any=3 , A : Any=4 , A : Optional[int]=None , ):
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : Dict = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : Optional[Any] = use_input_mask
_UpperCAmelCase : Optional[int] = use_token_type_ids
_UpperCAmelCase : List[str] = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : int = hidden_size
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : List[str] = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Dict = attention_probs_dropout_prob
_UpperCAmelCase : int = max_position_embeddings
_UpperCAmelCase : int = type_vocab_size
_UpperCAmelCase : Optional[int] = type_sequence_label_size
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : Dict = num_labels
_UpperCAmelCase : str = num_choices
_UpperCAmelCase : List[str] = scope
def _A ( self : Optional[int] ):
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def _A ( self : int ):
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[str] = None
if self.use_input_mask:
_UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : str = None
if self.use_labels:
_UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self : List[Any] ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _A ( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : List[str] , A : Optional[int] , A : Any , A : str ):
_UpperCAmelCase : Dict = MPNetModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(A , A )
_UpperCAmelCase : str = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A ( self : int , A : int , A : Optional[int] , A : Union[str, Any] , A : Any , A : List[Any] , A : Optional[int] ):
_UpperCAmelCase : Any = MPNetForQuestionAnswering(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : List[str] = model(
A , attention_mask=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self : List[str] , A : str , A : Optional[Any] , A : int , A : List[Any] , A : Union[str, Any] , A : int ):
_UpperCAmelCase : Union[str, Any] = self.num_labels
_UpperCAmelCase : Tuple = MPNetForSequenceClassification(A )
model.to(A )
model.eval()
_UpperCAmelCase : str = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : str , A : Optional[Any] , A : Tuple , A : Union[str, Any] , A : Union[str, Any] , A : str , A : int ):
_UpperCAmelCase : List[str] = self.num_choices
_UpperCAmelCase : Optional[int] = MPNetForMultipleChoice(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : str = model(
A , attention_mask=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self : Optional[Any] , A : List[Any] , A : List[Any] , A : int , A : List[Any] , A : Any , A : Dict ):
_UpperCAmelCase : Union[str, Any] = self.num_labels
_UpperCAmelCase : Union[str, Any] = MPNetForTokenClassification(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : str = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : List[str] ):
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = config_and_inputs
_UpperCAmelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ (snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Any = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__UpperCamelCase: List[Any] = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase: Union[str, Any] = False
__UpperCamelCase: Optional[int] = True
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = MPNetModelTester(self )
_UpperCAmelCase : Any = ConfigTester(self , config_class=A , hidden_size=37 )
def _A ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _A ( self : Optional[int] ):
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*A )
def _A ( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*A )
def _A ( self : Tuple ):
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*A )
def _A ( self : str ):
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*A )
def _A ( self : str ):
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*A )
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self : int ):
_UpperCAmelCase : Optional[int] = MPNetModel.from_pretrained("microsoft/mpnet-base" )
_UpperCAmelCase : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCAmelCase : Dict = model(A )[0]
_UpperCAmelCase : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , A )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1E-4 ) )
| 31 | '''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> float:
"""simple docstring"""
def get_matched_characters(_UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Dict = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCAmelCase : int = int(max(0 , i - limit ) )
_UpperCAmelCase : Any = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
_UpperCAmelCase : List[Any] = F"""{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}"""
return "".join(_UpperCAmelCase )
# matching characters
_UpperCAmelCase : Union[str, Any] = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Tuple = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Tuple = len(_UpperCAmelCase )
# transposition
_UpperCAmelCase : Optional[Any] = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
_UpperCAmelCase : Dict = 0.0
else:
_UpperCAmelCase : Optional[int] = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCAmelCase : str = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 31 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE :Dict = 16
SCREAMING_SNAKE_CASE :Tuple = 32
def UpperCAmelCase ( a_ , a_ = 1_6 , a_ = "bert-base-cased" ) -> List[str]:
"""simple docstring"""
__A = AutoTokenizer.from_pretrained(a_ )
__A = load_dataset("glue" , "mrpc" )
def tokenize_function(a_ ):
# max_length=None => use the model max length (it's actually the default)
__A = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__A = datasets.map(
a_ , batched=a_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=a_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(a_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a_ , padding="max_length" , max_length=1_2_8 , return_tensors="pt" )
return tokenizer.pad(a_ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
__A = DataLoader(
tokenized_datasets["train"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
__A = DataLoader(
tokenized_datasets["validation"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
__A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A = config["lr"]
__A = int(config["num_epochs"] )
__A = int(config["seed"] )
__A = int(config["batch_size"] )
__A = args.model_name_or_path
set_seed(a_ )
__A = get_dataloaders(a_ , a_ , a_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A = AutoModelForSequenceClassification.from_pretrained(a_ , return_dict=a_ )
# Instantiate optimizer
__A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__A = optimizer_cls(params=model.parameters() , lr=a_ )
if accelerator.state.deepspeed_plugin is not None:
__A = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
__A = 1
__A = (len(a_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__A = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=0 , num_training_steps=a_ , )
else:
__A = DummyScheduler(a_ , total_num_steps=a_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# We need to keep track of how many total steps we have iterated over
__A = 0
# We also need to keep track of the stating epoch so files are named properly
__A = 0
# Now we train the model
__A = evaluate.load("glue" , "mrpc" )
__A = 0
__A = {}
for epoch in range(a_ , a_ ):
model.train()
for step, batch in enumerate(a_ ):
__A = model(**a_ )
__A = outputs.loss
__A = loss / gradient_accumulation_steps
accelerator.backward(a_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__A = 0
for step, batch in enumerate(a_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__A = model(**a_ )
__A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__A = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(a_ ) - 1:
__A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=a_ , references=a_ , )
__A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , a_ )
__A = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
__A = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(a_ , a_ )
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
__A = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=a_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=a_ , )
parser.add_argument(
"--output_dir" , type=a_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=a_ , default=a_ , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=a_ , default=3 , help="Number of train epochs." , )
__A = parser.parse_args()
__A = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 367 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = CTRLTokenizer
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Dict ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
__A = dict(zip(A ,range(len(A ) ) ) )
__A = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
__A = {"unk_token": "<unk>"}
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(A ) )
def UpperCamelCase_ ( self : List[str] ,**A : List[str] ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[int] ,A : Tuple ):
__A = "adapt react readapt apt"
__A = "adapt react readapt apt"
return input_text, output_text
def UpperCamelCase_ ( self : Any ):
__A = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
__A = "adapt react readapt apt"
__A = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
__A = tokenizer.tokenize(A )
self.assertListEqual(A ,A )
__A = tokens + [tokenizer.unk_token]
__A = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,A )
| 124 | 0 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowercase__ ( _snake_case ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = SMALL_MODEL_IDENTIFIER
_SCREAMING_SNAKE_CASE : int = """pt"""
_SCREAMING_SNAKE_CASE : int = """tf"""
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Any = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__snake_case )
model_tf.save_pretrained(__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = """mock_framework"""
# Framework provided - return whatever the user provides
_SCREAMING_SNAKE_CASE : List[Any] = FeaturesManager.determine_framework(self.test_model , __snake_case )
self.assertEqual(__snake_case , __snake_case )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__snake_case )
_SCREAMING_SNAKE_CASE : int = FeaturesManager.determine_framework(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = FeaturesManager.determine_framework(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__snake_case )
_SCREAMING_SNAKE_CASE : int = FeaturesManager.determine_framework(__snake_case )
self.assertEqual(__snake_case , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__snake_case )
_SCREAMING_SNAKE_CASE : int = FeaturesManager.determine_framework(__snake_case )
self.assertEqual(__snake_case , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__snake_case ):
_SCREAMING_SNAKE_CASE : List[str] = FeaturesManager.determine_framework(__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = MagicMock(return_value=__snake_case )
with patch("""transformers.onnx.features.is_tf_available""" , __snake_case ):
_SCREAMING_SNAKE_CASE : str = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__snake_case , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_SCREAMING_SNAKE_CASE : Dict = MagicMock(return_value=__snake_case )
with patch("""transformers.onnx.features.is_torch_available""" , __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__snake_case , self.framework_tf )
# Both in environment -> use PyTorch
_SCREAMING_SNAKE_CASE : Dict = MagicMock(return_value=__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = MagicMock(return_value=__snake_case )
with patch("""transformers.onnx.features.is_tf_available""" , __snake_case ), patch(
"""transformers.onnx.features.is_torch_available""" , __snake_case ):
_SCREAMING_SNAKE_CASE : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__snake_case , self.framework_pt )
# Both not in environment -> raise error
_SCREAMING_SNAKE_CASE : str = MagicMock(return_value=__snake_case )
_SCREAMING_SNAKE_CASE : List[Any] = MagicMock(return_value=__snake_case )
with patch("""transformers.onnx.features.is_tf_available""" , __snake_case ), patch(
"""transformers.onnx.features.is_torch_available""" , __snake_case ):
with self.assertRaises(__snake_case ):
_SCREAMING_SNAKE_CASE : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
| 200 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
try:
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as flax_state_f:
_SCREAMING_SNAKE_CASE : Dict = from_bytes(SCREAMING_SNAKE_CASE__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE__ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_SCREAMING_SNAKE_CASE : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE__ : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE__ ) ).values()
if any(SCREAMING_SNAKE_CASE__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_SCREAMING_SNAKE_CASE : Dict = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = """"""
_SCREAMING_SNAKE_CASE : str = flatten_dict(SCREAMING_SNAKE_CASE__ , sep=""".""" )
_SCREAMING_SNAKE_CASE : str = pt_model.state_dict()
# keep track of unexpected & missing keys
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_SCREAMING_SNAKE_CASE : Any = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_SCREAMING_SNAKE_CASE : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
_SCREAMING_SNAKE_CASE : List[str] = jnp.transpose(SCREAMING_SNAKE_CASE__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_SCREAMING_SNAKE_CASE : Union[str, Any] = flax_key_tuple_array[:-1] + ["""weight"""]
_SCREAMING_SNAKE_CASE : Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_SCREAMING_SNAKE_CASE : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
_SCREAMING_SNAKE_CASE : Tuple = """.""".join(SCREAMING_SNAKE_CASE__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(SCREAMING_SNAKE_CASE__ ) if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) else flax_tensor
_SCREAMING_SNAKE_CASE : int = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE__ )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# re-transform missing_keys to list
_SCREAMING_SNAKE_CASE : Optional[Any] = list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 200 | 1 |
'''simple docstring'''
class __UpperCamelCase :
def __init__( self , __a , __a=None , __a=None ):
'''simple docstring'''
__a : Any = data
__a : List[Any] = previous
__a : Optional[int] = next_node
def __str__( self ):
'''simple docstring'''
return f"""{self.data}"""
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.data
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.next
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.previous
class __UpperCamelCase :
def __init__( self , __a ):
'''simple docstring'''
__a : Optional[int] = head
def __iter__( self ):
'''simple docstring'''
return self
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
__a : Dict = self.current.get_data()
__a : Tuple = self.current.get_next()
return value
class __UpperCamelCase :
def __init__( self ):
'''simple docstring'''
__a : Union[str, Any] = None # First node in list
__a : Tuple = None # Last node in list
def __str__( self ):
'''simple docstring'''
__a : Dict = self.head
__a : Optional[int] = []
while current is not None:
nodes.append(current.get_data() )
__a : Dict = current.get_next()
return " ".join(str(__a ) for node in nodes )
def __contains__( self , __a ):
'''simple docstring'''
__a : Dict = self.head
while current:
if current.get_data() == value:
return True
__a : Union[str, Any] = current.get_next()
return False
def __iter__( self ):
'''simple docstring'''
return LinkedListIterator(self.head )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.head is None:
__a : List[str] = node
__a : List[str] = node
else:
self.insert_before_node(self.head , __a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.head is None:
self.set_head(__a )
else:
self.insert_after_node(self.tail , __a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : int = Node(__a )
if self.head is None:
self.set_head(__a )
else:
self.set_tail(__a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Optional[int] = node
__a : Tuple = node.previous
if node.get_previous() is None:
__a : Any = node_to_insert
else:
__a : Optional[int] = node_to_insert
__a : Tuple = node_to_insert
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : int = node
__a : Any = node.next
if node.get_next() is None:
__a : Optional[Any] = node_to_insert
else:
__a : Any = node_to_insert
__a : List[str] = node_to_insert
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : Union[str, Any] = 1
__a : Tuple = Node(__a )
__a : Union[str, Any] = self.head
while node:
if current_position == position:
self.insert_before_node(__a , __a )
return
current_position += 1
__a : Tuple = node.next
self.insert_after_node(self.tail , __a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : List[str] = self.head
while node:
if node.get_data() == item:
return node
__a : Dict = node.get_next()
raise Exception('Node not found' )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if (node := self.get_node(__a )) is not None:
if node == self.head:
__a : Optional[Any] = self.head.get_next()
if node == self.tail:
__a : Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(__a )
@staticmethod
def __UpperCAmelCase ( __a ):
'''simple docstring'''
if node.get_next():
__a : Optional[int] = node.previous
if node.get_previous():
__a : Any = node.next
__a : List[Any] = None
__a : Tuple = None
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.head is None
def lowerCamelCase ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 294 | 0 |
def UpperCamelCase__( UpperCamelCase__ : list )->Optional[Any]:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
return True
A__ = series[1] - series[0]
for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def UpperCamelCase__( UpperCamelCase__ : list )->Any:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Input list must be a non empty list''' )
A__ = 0
for val in series:
answer += val
return answer / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Optional[int] , **__lowercase : Dict ):
'''simple docstring'''
super().__init__(**__lowercase )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : str , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : int ):
'''simple docstring'''
return super().__call__(__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , **__lowercase : Union[str, Any] ):
'''simple docstring'''
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__a = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCamelCase_ ( self : int , __lowercase : Dict , __lowercase : Dict=None , __lowercase : str="This is a sound of {}." ):
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(__lowercase ).content
else:
with open(__lowercase , """rb""" ) as f:
__a = f.read()
if isinstance(__lowercase , __lowercase ):
__a = ffmpeg_read(__lowercase , self.feature_extractor.sampling_rate )
if not isinstance(__lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
__a = candidate_labels
__a = [hypothesis_template.format(__lowercase ) for x in candidate_labels]
__a = self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase )
__a = [text_inputs]
return inputs
def UpperCamelCase_ ( self : Any , __lowercase : Any ):
'''simple docstring'''
__a = model_inputs.pop("""candidate_labels""" )
__a = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __lowercase ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**__lowercase , **__lowercase )
__a = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Dict ):
'''simple docstring'''
__a = model_outputs.pop("""candidate_labels""" )
__a = model_outputs["""logits"""][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
__a = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : -x[0] )
]
return result
| 302 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A = logging.get_logger(__name__)
_A = {"""tokenizer_file""": """tokenizer.json"""}
_A = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = None
def __init__(self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="<unk>" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<pad>" , _lowerCamelCase=False , _lowerCamelCase=False , **_lowerCamelCase , ):
"""simple docstring"""
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , unk_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , pad_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase , **_lowerCamelCase , )
UpperCAmelCase__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCamelCase ) != add_prefix_space:
UpperCAmelCase__ : Tuple = getattr(_lowerCamelCase , pre_tok_state.pop("""type""" ) )
UpperCAmelCase__ : Any = add_prefix_space
UpperCAmelCase__ : str = pre_tok_class(**_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = kwargs.get("""is_split_into_words""" , _lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = kwargs.get("""is_split_into_words""" , _lowerCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
UpperCAmelCase__ : Any = input_ids[-self.model_max_length :]
return input_ids
| 353 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 6_37_81_37
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> float:
UpperCAmelCase__ : Union[str, Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase__ : List[str] = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) )
UpperCAmelCase__ : str = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase__ : Any = haversine_distance(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase__ : int = (b_lata + b_lata) / 2
UpperCAmelCase__ : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase__ : Optional[Any] = (sin(lowerCAmelCase ) ** 2) * (cos(lowerCAmelCase ) ** 2)
UpperCAmelCase__ : Optional[Any] = cos(sigma / 2 ) ** 2
UpperCAmelCase__ : Optional[int] = (sigma - sin(lowerCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase__ : Optional[int] = (cos(lowerCAmelCase ) ** 2) * (sin(lowerCAmelCase ) ** 2)
UpperCAmelCase__ : str = sin(sigma / 2 ) ** 2
UpperCAmelCase__ : Union[str, Any] = (sigma + sin(lowerCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 166 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Union[str, Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a ( lowercase__ ):
"""simple docstring"""
a : str = 'data2vec-vision'
def __init__( self : Tuple , __lowercase : List[str]=768 , __lowercase : List[Any]=12 , __lowercase : Any=12 , __lowercase : str=3072 , __lowercase : Any="gelu" , __lowercase : Union[str, Any]=0.0 , __lowercase : Optional[int]=0.0 , __lowercase : List[Any]=0.02 , __lowercase : Union[str, Any]=1e-1_2 , __lowercase : List[str]=224 , __lowercase : Dict=16 , __lowercase : Dict=3 , __lowercase : List[Any]=False , __lowercase : Any=False , __lowercase : Dict=False , __lowercase : Dict=False , __lowercase : Optional[int]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Tuple=True , __lowercase : Any=[3, 5, 7, 11] , __lowercase : int=[1, 2, 3, 6] , __lowercase : Optional[int]=True , __lowercase : List[Any]=0.4 , __lowercase : Union[str, Any]=256 , __lowercase : Optional[int]=1 , __lowercase : Any=False , __lowercase : Any=255 , **__lowercase : List[Any] , ) -> str:
super().__init__(**__lowercase )
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : str = hidden_act
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Optional[int] = patch_size
__UpperCAmelCase : List[Any] = num_channels
__UpperCAmelCase : Optional[Any] = use_mask_token
__UpperCAmelCase : int = use_absolute_position_embeddings
__UpperCAmelCase : Tuple = use_relative_position_bias
__UpperCAmelCase : List[str] = use_shared_relative_position_bias
__UpperCAmelCase : Optional[int] = layer_scale_init_value
__UpperCAmelCase : int = drop_path_rate
__UpperCAmelCase : Any = use_mean_pooling
# decode head attributes (semantic segmentation)
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
__UpperCAmelCase : Dict = use_auxiliary_head
__UpperCAmelCase : Dict = auxiliary_loss_weight
__UpperCAmelCase : List[str] = auxiliary_channels
__UpperCAmelCase : List[Any] = auxiliary_num_convs
__UpperCAmelCase : Union[str, Any] = auxiliary_concat_input
__UpperCAmelCase : Any = semantic_loss_ignore_index
class a ( lowercase__ ):
"""simple docstring"""
a : Union[str, Any] = version.parse('1.11' )
@property
def UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase ( self : Tuple ) -> float:
return 1e-4
| 114 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Tuple = [1]
for i in range(2 , __lowerCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : str = list(range(__lowerCamelCase ) )
# Find permutation
while factorials:
__UpperCAmelCase : Any = factorials.pop()
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = divmod(__lowerCamelCase , __lowerCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 | 1 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1E-12 ) -> Dict:
lowerCAmelCase__ : Optional[int] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__UpperCAmelCase , axis=1 ) , a_min=__UpperCAmelCase ) ).T
lowerCAmelCase__ : Any = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__UpperCAmelCase , axis=1 ) , a_min=__UpperCAmelCase ) ).T
return jnp.matmul(__UpperCAmelCase , norm_emb_a.T )
class _lowerCamelCase ( nn.Module ):
_lowerCamelCase :CLIPConfig
_lowerCamelCase :jnp.dtype = jnp.floataa
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config )
lowerCAmelCase__ : Dict = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase , dtype=self.dtype )
lowerCAmelCase__ : Optional[int] = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowerCAmelCase__ : Optional[int] = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowerCAmelCase__ : List[Any] = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
lowerCAmelCase__ : List[Any] = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self : str , UpperCamelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.vision_model(UpperCamelCase )[1]
lowerCAmelCase__ : List[Any] = self.visual_projection(UpperCamelCase )
lowerCAmelCase__ : str = jax_cosine_distance(UpperCamelCase , self.special_care_embeds )
lowerCAmelCase__ : Optional[int] = jax_cosine_distance(UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase__ : Any = 0.0
lowerCAmelCase__ : List[Any] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase__ : Any = jnp.round(UpperCamelCase , 3 )
lowerCAmelCase__ : List[str] = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase )
# Use a lower threshold if an image has any special care concept
lowerCAmelCase__ : Tuple = is_special_care * 0.01
lowerCAmelCase__ : List[Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase__ : Dict = jnp.round(UpperCamelCase , 3 )
lowerCAmelCase__ : List[str] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = CLIPConfig
_lowerCamelCase :Dict = "clip_input"
_lowerCamelCase :int = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : int , UpperCamelCase : CLIPConfig , UpperCamelCase : Optional[Tuple] = None , UpperCamelCase : int = 0 , UpperCamelCase : jnp.dtype = jnp.floataa , UpperCamelCase : bool = True , **UpperCamelCase : int , ) -> str:
"""simple docstring"""
if input_shape is None:
lowerCAmelCase__ : str = (1, 2_24, 2_24, 3)
lowerCAmelCase__ : Union[str, Any] = self.module_class(config=UpperCamelCase , dtype=UpperCamelCase , **UpperCamelCase )
super().__init__(UpperCamelCase , UpperCamelCase , input_shape=UpperCamelCase , seed=UpperCamelCase , dtype=UpperCamelCase , _do_init=_do_init )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : jax.random.KeyArray , UpperCamelCase : Tuple , UpperCamelCase : FrozenDict = None ) -> FrozenDict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = jax.random.normal(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = jax.random.split(UpperCamelCase )
lowerCAmelCase__ : List[Any] = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase__ : str = self.module.init(UpperCamelCase , UpperCamelCase )["""params"""]
return random_params
def __call__( self : Dict , UpperCamelCase : Tuple , UpperCamelCase : dict = None , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = jnp.transpose(UpperCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(UpperCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 369 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_A = logging.getLogger(__name__)
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Union[str, Any] = "token-classification"
def __init__( self : Dict , UpperCamelCase : Any ) -> Optional[int]:
"""simple docstring"""
if type(UpperCamelCase ) == dict:
lowerCAmelCase__ : Optional[int] = Namespace(**UpperCamelCase )
lowerCAmelCase__ : Tuple = import_module("""tasks""" )
try:
lowerCAmelCase__ : Union[str, Any] = getattr(UpperCamelCase , hparams.task_type )
lowerCAmelCase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
lowerCAmelCase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowerCAmelCase__ : Dict = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase , len(self.labels ) , self.mode )
def _lowerCAmelCase ( self : int , **UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
return self.model(**UpperCamelCase )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : Tuple = self(**UpperCamelCase )
lowerCAmelCase__ : List[Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.hparams
for mode in ["train", "dev", "test"]:
lowerCAmelCase__ : Union[str, Any] = self._feature_file(UpperCamelCase )
if os.path.exists(UpperCamelCase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , UpperCamelCase )
lowerCAmelCase__ : Tuple = torch.load(UpperCamelCase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
lowerCAmelCase__ : Union[str, Any] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase )
lowerCAmelCase__ : Tuple = self.token_classification_task.convert_examples_to_features(
UpperCamelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , UpperCamelCase )
torch.save(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : bool = False ) -> DataLoader:
"""simple docstring"""
lowerCAmelCase__ : int = self._feature_file(UpperCamelCase )
logger.info("""Loading features from cached file %s""" , UpperCamelCase )
lowerCAmelCase__ : int = torch.load(UpperCamelCase )
lowerCAmelCase__ : str = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCAmelCase__ : Any = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowerCAmelCase__ : Optional[int] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowerCAmelCase__ : Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowerCAmelCase__ : Union[str, Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , batch_size=UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
"""Compute validation""" ""
lowerCAmelCase__ : str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowerCAmelCase__ : List[Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowerCAmelCase__ : Union[str, Any] = self(**UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = outputs[:2]
lowerCAmelCase__ : Optional[Any] = logits.detach().cpu().numpy()
lowerCAmelCase__ : Optional[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
lowerCAmelCase__ : Any = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
lowerCAmelCase__ : List[str] = np.argmax(UpperCamelCase , axis=2 )
lowerCAmelCase__ : str = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
lowerCAmelCase__ : Any = dict(enumerate(self.labels ) )
lowerCAmelCase__ : str = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowerCAmelCase__ : Optional[int] = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(UpperCamelCase , UpperCamelCase ),
"""precision""": precision_score(UpperCamelCase , UpperCamelCase ),
"""recall""": recall_score(UpperCamelCase , UpperCamelCase ),
"""f1""": fa_score(UpperCamelCase , UpperCamelCase ),
}
lowerCAmelCase__ : Dict = dict(results.items() )
lowerCAmelCase__ : List[Any] = results
return ret, preds_list, out_label_list
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
# when stable
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = self._eval_end(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCAmelCase ( self : Dict , UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
# updating to test_epoch_end instead of deprecated test_end
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._eval_end(UpperCamelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowerCAmelCase__ : int = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCAmelCase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
# Add NER specific options
BaseTransformer.add_model_specific_args(UpperCamelCase , UpperCamelCase )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=UpperCamelCase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=UpperCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=UpperCamelCase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=UpperCamelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
_A = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_A = NERTransformer.add_model_specific_args(parser, os.getcwd())
_A = parser.parse_args()
_A = NERTransformer(args)
_A = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_A = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
_A = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 212 | 0 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : Optional[Any] = 'src/diffusers'
UpperCAmelCase__ : Any = '.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase__ : int = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase__ : Optional[int] = spec.loader.load_module()
def lowercase_ ( _snake_case ,_snake_case ):
return line.startswith(_snake_case ) or len(_snake_case ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" ,_snake_case ) is not None
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = object_name.split(""".""" )
SCREAMING_SNAKE_CASE__ : List[Any] = 0
# First let's find the module where our object lives.
SCREAMING_SNAKE_CASE__ : Dict = parts[i]
while i < len(_snake_case ) and not os.path.isfile(os.path.join(_snake_case ,f'''{module}.py''' ) ):
i += 1
if i < len(_snake_case ):
SCREAMING_SNAKE_CASE__ : str = os.path.join(_snake_case ,parts[i] )
if i >= len(_snake_case ):
raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(_snake_case ,f'''{module}.py''' ) ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = f.readlines()
# Now let's find the class / func in the code!
SCREAMING_SNAKE_CASE__ : str = """"""
SCREAMING_SNAKE_CASE__ : Dict = 0
for name in parts[i + 1 :]:
while (
line_index < len(_snake_case ) and re.search(Rf'''^{indent}(class|def)\s+{name}(\(|\:)''' ,lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_snake_case ):
raise ValueError(f''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
SCREAMING_SNAKE_CASE__ : List[Any] = line_index
while line_index < len(_snake_case ) and _should_continue(lines[line_index] ,_snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = lines[start_index:line_index]
return "".join(_snake_case )
UpperCAmelCase__ : Optional[int] = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCAmelCase__ : Any = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCAmelCase__ : Optional[Any] = re.compile(r'<FILL\s+[^>]*>')
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = code.split("""\n""" )
SCREAMING_SNAKE_CASE__ : str = 0
while idx < len(_snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_snake_case ):
return re.search(R"""^(\s*)\S""" ,lines[idx] ).groups()[0]
return ""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = len(get_indent(_snake_case ) ) > 0
if has_indent:
SCREAMING_SNAKE_CASE__ : Dict = f'''class Bla:\n{code}'''
SCREAMING_SNAKE_CASE__ : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 ,preview=_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = black.format_str(_snake_case ,mode=_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = style_docstrings_in_code(_snake_case )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def lowercase_ ( _snake_case ,_snake_case=False ):
with open(_snake_case ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = f.readlines()
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = search.groups()
SCREAMING_SNAKE_CASE__ : Optional[Any] = find_code_in_diffusers(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_indent(_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
SCREAMING_SNAKE_CASE__ : List[str] = theoretical_indent
SCREAMING_SNAKE_CASE__ : int = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
while line_index < len(_snake_case ) and should_continue:
line_index += 1
if line_index >= len(_snake_case ):
break
SCREAMING_SNAKE_CASE__ : List[str] = lines[line_index]
SCREAMING_SNAKE_CASE__ : Tuple = _should_continue(_snake_case ,_snake_case ) and re.search(f'''^{indent}# End copy''' ,_snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE__ : Tuple = lines[start_index:line_index]
SCREAMING_SNAKE_CASE__ : Any = """""".join(_snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(_snake_case ) is None]
SCREAMING_SNAKE_CASE__ : Tuple = """\n""".join(_snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(_snake_case ) > 0:
SCREAMING_SNAKE_CASE__ : str = replace_pattern.replace("""with""" ,"""""" ).split(""",""" )
SCREAMING_SNAKE_CASE__ : List[str] = [_re_replace_pattern.search(_snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = pattern.groups()
SCREAMING_SNAKE_CASE__ : Dict = re.sub(_snake_case ,_snake_case ,_snake_case )
if option.strip() == "all-casing":
SCREAMING_SNAKE_CASE__ : List[Any] = re.sub(obja.lower() ,obja.lower() ,_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.sub(obja.upper() ,obja.upper() ,_snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
SCREAMING_SNAKE_CASE__ : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
SCREAMING_SNAKE_CASE__ : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
SCREAMING_SNAKE_CASE__ : List[str] = lines[:start_index] + [theoretical_code] + lines[line_index:]
SCREAMING_SNAKE_CASE__ : Optional[int] = start_index + 1
if overwrite and len(_snake_case ) > 0:
# Warn the user a file has been modified.
print(f'''Detected changes, rewriting {filename}.''' )
with open(_snake_case ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.writelines(_snake_case )
return diffs
def lowercase_ ( _snake_case = False ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = glob.glob(os.path.join(_snake_case ,"""**/*.py""" ) ,recursive=_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = []
for filename in all_files:
SCREAMING_SNAKE_CASE__ : Tuple = is_copy_consistent(_snake_case ,_snake_case )
diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(_snake_case ) > 0:
SCREAMING_SNAKE_CASE__ : List[Any] = """\n""".join(_snake_case )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCAmelCase__ : Dict = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 25 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = abs(UpperCamelCase )
lowerCAmelCase__ : List[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = abs(UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return sum(int(UpperCamelCase ) for c in str(abs(UpperCamelCase ) ) )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
lowerCAmelCase__ : str = f"""{func.__name__}({value})"""
lowerCAmelCase__ : str = timeit(f"""__main__.{call}""" , setup="""import __main__""" )
print(f"""{call:56} = {func(UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 37 | 0 |
import numpy as np
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1e-12, lowerCamelCase = 100, ):
assert np.shape(lowerCamelCase )[0] == np.shape(lowerCamelCase )[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase )[0] == np.shape(lowerCamelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase ) == np.iscomplexobj(lowerCamelCase )
lowercase :Tuple = np.iscomplexobj(lowerCamelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase, input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowercase :List[str] = False
lowercase :Optional[int] = 0
lowercase :List[Any] = 0
lowercase :Optional[Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
lowercase :str = np.dot(lowerCamelCase, lowerCamelCase )
# Normalize the resulting output vector.
lowercase :Any = w / np.linalg.norm(lowerCamelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowercase :Union[str, Any] = vector.conj().T if is_complex else vector.T
lowercase :Dict = np.dot(lowerCamelCase, np.dot(lowerCamelCase, lowerCamelCase ) )
# Check convergence.
lowercase :int = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowercase :Optional[Any] = True
lowercase :List[Any] = lambda_
if is_complex:
lowercase :int = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase__ ( ):
lowercase :Optional[Any] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowercase :List[str] = np.array([41, 4, 20] )
lowercase :Union[str, Any] = real_input_matrix.astype(np.complexaaa )
lowercase :List[Any] = np.triu(1J * complex_input_matrix, 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowercase :Tuple = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowercase :str = real_input_matrix
lowercase :Any = real_vector
elif problem_type == "complex":
lowercase :List[Any] = complex_input_matrix
lowercase :Optional[int] = complex_vector
# Our implementation.
lowercase , lowercase :Any = power_iteration(lowerCamelCase, lowerCamelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowercase , lowercase :List[str] = np.linalg.eigh(lowerCamelCase )
# Last eigenvalue is the maximum one.
lowercase :List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowercase :List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase ) - np.abs(lowerCamelCase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 158 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_UpperCAmelCase : Union[str, Any] = datasets.logging.get_logger(__name__)
_UpperCAmelCase : Tuple = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
_UpperCAmelCase : int = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
_UpperCAmelCase : Union[str, Any] = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="dummy_doc" ):
lowercase :str = {doc: key_lines}
lowercase :Union[str, Any] = {doc: sys_lines}
lowercase :Tuple = {}
lowercase :Optional[Any] = 0
lowercase :str = 0
lowercase :Optional[Any] = 0
lowercase :str = 0
lowercase :Union[str, Any] = 0
lowercase :int = 0
lowercase , lowercase :str = reader.get_doc_mentions(lowerCamelCase, key_doc_lines[doc], lowerCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
lowercase :Any = reader.set_annotated_parse_trees(lowerCamelCase, key_doc_lines[doc], lowerCamelCase, lowerCamelCase )
lowercase , lowercase :int = reader.get_doc_mentions(lowerCamelCase, sys_doc_lines[doc], lowerCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowercase :str = reader.set_annotated_parse_trees(lowerCamelCase, key_doc_lines[doc], lowerCamelCase, lowerCamelCase )
if remove_nested:
lowercase , lowercase :List[str] = reader.remove_nested_coref_mentions(lowerCamelCase, lowerCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowercase , lowercase :Any = reader.remove_nested_coref_mentions(lowerCamelCase, lowerCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowercase :Optional[Any] = reader.get_mention_assignments(lowerCamelCase, lowerCamelCase )
lowercase :str = reader.get_mention_assignments(lowerCamelCase, lowerCamelCase )
lowercase :Optional[int] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"Number of resulting singleton clusters in the key "
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"files, respectively" )
return doc_coref_infos
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :Union[str, Any] = get_coref_infos(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase :List[str] = {}
lowercase :Dict = 0
lowercase :Tuple = 0
for name, metric in metrics:
lowercase , lowercase , lowercase :int = evaluator.evaluate_documents(lowerCamelCase, lowerCamelCase, beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(10 ), F"Recall: {recall * 100:.2f}", F" Precision: {precision * 100:.2f}", F" F1: {fa * 100:.2f}", )
if conll_subparts_num == 3:
lowercase :Any = (conll / 3) * 100
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({"conll_score": conll} )
return output_scores
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :str = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
lowercase :Union[str, Any] = line.split()[5]
if not parse_col == "-":
lowercase :Optional[int] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __lowerCAmelCase ( datasets.Metric):
def SCREAMING_SNAKE_CASE ( self: Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Dict , _lowerCAmelCase: Tuple , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: Dict=False , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: Dict=False ):
lowercase :Any = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
lowercase :List[str] = util.check_gold_parse_annotation(_lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowercase :List[str] = evaluate(
key_lines=_lowerCAmelCase , sys_lines=_lowerCAmelCase , metrics=_lowerCAmelCase , NP_only=_lowerCAmelCase , remove_nested=_lowerCAmelCase , keep_singletons=_lowerCAmelCase , min_span=_lowerCAmelCase , )
return score
| 158 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : Tuple = 3
lowerCAmelCase__ : List[Any] = (32, 32)
lowerCAmelCase__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : int = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(__a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : List[str] = self.dummy_cond_unet_upscale
lowerCAmelCase__ : Union[str, Any] = DDPMScheduler()
lowerCAmelCase__ : List[str] = DDIMScheduler(prediction_type='v_prediction' )
lowerCAmelCase__ : List[str] = self.dummy_vae
lowerCAmelCase__ : Dict = self.dummy_text_encoder
lowerCAmelCase__ : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ : int = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ : Optional[Any] = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : Optional[int] = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
lowerCAmelCase__ : Union[str, Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCAmelCase__ : Dict = """A painting of a squirrel eating a burger"""
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=__a ).manual_seed(0 )
lowerCAmelCase__ : List[Any] = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ : Optional[int] = output.images
lowerCAmelCase__ : Union[str, Any] = torch.Generator(device=__a ).manual_seed(0 )
lowerCAmelCase__ : str = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=__a , )[0]
lowerCAmelCase__ : int = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
lowerCAmelCase__ : List[str] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCAmelCase__ : str = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : List[Any] = self.dummy_cond_unet_upscale
lowerCAmelCase__ : Tuple = DDPMScheduler()
lowerCAmelCase__ : int = DDIMScheduler(prediction_type='v_prediction' )
lowerCAmelCase__ : Optional[int] = self.dummy_vae
lowerCAmelCase__ : str = self.dummy_text_encoder
lowerCAmelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ : Dict = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : Union[str, Any] = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
lowerCAmelCase__ : Optional[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCAmelCase__ : List[str] = """A painting of a squirrel eating a burger"""
lowerCAmelCase__ : List[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ : str = output.images
assert image.shape[0] == 2
lowerCAmelCase__ : Union[str, Any] = torch.Generator(device=__a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ : Optional[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.dummy_cond_unet_upscale
lowerCAmelCase__ : Any = DDPMScheduler()
lowerCAmelCase__ : Dict = DDIMScheduler(prediction_type='v_prediction' )
lowerCAmelCase__ : List[Any] = self.dummy_vae
lowerCAmelCase__ : int = self.dummy_text_encoder
lowerCAmelCase__ : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ : List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ : str = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCAmelCase__ : Tuple = unet.half()
lowerCAmelCase__ : List[str] = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : Union[str, Any] = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
lowerCAmelCase__ : Union[str, Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCAmelCase__ : Dict = """A painting of a squirrel eating a burger"""
lowerCAmelCase__ : int = torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type='np' , ).images
lowerCAmelCase__ : int = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowerCAmelCase__ : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
lowerCAmelCase__ : Any = """stabilityai/stable-diffusion-x4-upscaler"""
lowerCAmelCase__ : Dict = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
lowerCAmelCase__ : List[str] = """a cat sitting on a park bench"""
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = pipe(
prompt=__a , image=__a , generator=__a , output_type='np' , )
lowerCAmelCase__ : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowerCAmelCase__ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
lowerCAmelCase__ : Optional[int] = """stabilityai/stable-diffusion-x4-upscaler"""
lowerCAmelCase__ : int = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
lowerCAmelCase__ : List[str] = """a cat sitting on a park bench"""
lowerCAmelCase__ : List[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = pipe(
prompt=__a , image=__a , generator=__a , output_type='np' , )
lowerCAmelCase__ : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowerCAmelCase__ : List[str] = """stabilityai/stable-diffusion-x4-upscaler"""
lowerCAmelCase__ : Any = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ : Optional[int] = """a cat sitting on a park bench"""
lowerCAmelCase__ : Dict = torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type='np' , )
lowerCAmelCase__ : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9 | 212 |
import math
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : int=0 ) -> Optional[Any]: # a graph with Node 0,1,...,N-1
"""simple docstring"""
__lowercase : Any = n
__lowercase : Optional[int] = [
[math.inf for j in range(0 , __a )] for i in range(0 , __a )
] # adjacency matrix for weight
__lowercase : Dict = [
[math.inf for j in range(0 , __a )] for i in range(0 , __a )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase ( self : int , __a : Optional[int] , __a : Tuple , __a : Dict ) -> Dict:
"""simple docstring"""
__lowercase : Any = w
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__lowercase : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase ( self : Any , __a : Any , __a : str ) -> int:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase : int = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3) | 233 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__snake_case : Dict =(3, 9, -1_1, 0, 7, 5, 1, -1)
__snake_case : Optional[int] =(4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
snake_case_ =42
snake_case_ =42
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Node | None = None
for i in sorted(__lowerCamelCase ,reverse=__lowerCamelCase ):
lowerCAmelCase__ : Any = Node(__lowerCamelCase ,self.head )
def __iter__(self ) -> Iterator[int]:
"""simple docstring"""
lowerCAmelCase__ : int = self.head
while node:
yield node.data
lowerCAmelCase__ : int = node.next_node
def __len__(self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__(self ) -> str:
"""simple docstring"""
return " -> ".join([str(__lowerCamelCase ) for node in self] )
def lowerCAmelCase__ ( lowerCamelCase_ : SortedLinkedList ,lowerCamelCase_ : SortedLinkedList):
'''simple docstring'''
return SortedLinkedList(list(lowerCamelCase_) + list(lowerCamelCase_))
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : Optional[Any] =SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 94 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : List[Any]=1024 ,lowerCamelCase_ : int=1024 ,lowerCamelCase_ : Dict=False ,**lowerCamelCase_ : Tuple):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = SeqaSeqDataset(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,type_path='''train''' ,**lowerCamelCase_)
lowerCAmelCase__ : int = tok.pad_token_id
def get_lens(lowerCamelCase_ : Tuple):
lowerCAmelCase__ : Tuple = tqdm(
DataLoader(lowerCamelCase_ ,batch_size=512 ,num_workers=8 ,shuffle=lowerCamelCase_ ,collate_fn=ds.collate_fn) ,desc=str(ds.len_file) ,)
lowerCAmelCase__ : Tuple = []
for batch in dl:
lowerCAmelCase__ : Dict = batch['''input_ids'''].ne(lowerCamelCase_).sum(1).tolist()
lowerCAmelCase__ : Dict = batch['''labels'''].ne(lowerCamelCase_).sum(1).tolist()
if consider_target:
for src, tgt in zip(lowerCamelCase_ ,lowerCamelCase_):
max_lens.append(max(lowerCamelCase_ ,lowerCamelCase_))
else:
max_lens.extend(lowerCamelCase_)
return max_lens
lowerCAmelCase__ : str = get_lens(lowerCamelCase_)
lowerCAmelCase__ : Tuple = SeqaSeqDataset(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,type_path='''val''' ,**lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = get_lens(lowerCamelCase_)
pickle_save(lowerCamelCase_ ,train_ds.len_file)
pickle_save(lowerCamelCase_ ,val_ds.len_file)
if __name__ == "__main__":
fire.Fire(save_len_file)
| 94 | 1 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( UpperCamelCase_):
def __init__( self : Optional[int] , UpperCamelCase__ : CLIPSegForImageSegmentation , UpperCamelCase__ : CLIPSegProcessor , UpperCamelCase__ : AutoencoderKL , UpperCamelCase__ : CLIPTextModel , UpperCamelCase__ : CLIPTokenizer , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase__ : StableDiffusionSafetyChecker , UpperCamelCase__ : CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
SCREAMING_SNAKE_CASE : str = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = dict(scheduler.config )
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Dict = FrozenDict(UpperCamelCase__ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
SCREAMING_SNAKE_CASE : List[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = dict(scheduler.config )
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = FrozenDict(UpperCamelCase__ )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=UpperCamelCase__ , segmentation_processor=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , )
def __A ( self : Union[str, Any] , UpperCamelCase__ : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
self.enable_attention_slicing(UpperCamelCase__ )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
SCREAMING_SNAKE_CASE : Tuple = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__ , UpperCamelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self : int ):
'''simple docstring'''
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , UpperCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCamelCase__ : str , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 50 , UpperCamelCase__ : float = 7.5 , UpperCamelCase__ : Optional[Union[str, List[str]]] = None , UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__ : int = 1 , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
SCREAMING_SNAKE_CASE : Optional[Any] = self.segmentation_model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(UpperCamelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
SCREAMING_SNAKE_CASE : str = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , )
| 182 | import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__UpperCamelCase : Any = getLogger(__name__)
__UpperCamelCase : int = 'cuda' if torch.cuda.is_available() else 'cpu'
def A ( _lowercase , _lowercase , _lowercase , _lowercase = 8 , _lowercase = DEFAULT_DEVICE , _lowercase=False , _lowercase="summarization" , _lowercase=None , **_lowercase , ):
SCREAMING_SNAKE_CASE : List[str] = Path(_lowercase ).open('''w''' , encoding='''utf-8''' )
SCREAMING_SNAKE_CASE : int = str(_lowercase )
SCREAMING_SNAKE_CASE : Any = AutoModelForSeqaSeqLM.from_pretrained(_lowercase ).to(_lowercase )
if fpaa:
SCREAMING_SNAKE_CASE : Dict = model.half()
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(_lowercase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
SCREAMING_SNAKE_CASE : str = time.time()
# update config with task specific params
use_task_specific_params(_lowercase , _lowercase )
if prefix is None:
SCREAMING_SNAKE_CASE : Optional[int] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(_lowercase , _lowercase ) ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prefix + text for text in examples_chunk]
SCREAMING_SNAKE_CASE : Dict = tokenizer(_lowercase , return_tensors='''pt''' , truncation=_lowercase , padding='''longest''' ).to(_lowercase )
SCREAMING_SNAKE_CASE : str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowercase , )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
SCREAMING_SNAKE_CASE : Tuple = int(time.time() - start_time ) # seconds
SCREAMING_SNAKE_CASE : str = len(_lowercase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def A ( ):
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def A ( _lowercase=True ):
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=_lowercase , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=_lowercase , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=_lowercase , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=_lowercase , required=_lowercase , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=_lowercase , required=_lowercase , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=_lowercase , required=_lowercase , default=_lowercase , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=_lowercase , required=_lowercase , default=_lowercase , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=_lowercase , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_lowercase , default=8 , required=_lowercase , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=_lowercase , default=-1 , required=_lowercase , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=_lowercase , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_known_args()
SCREAMING_SNAKE_CASE : Optional[Any] = parse_numeric_n_bool_cl_kwargs(_lowercase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
SCREAMING_SNAKE_CASE : Any = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowercase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
SCREAMING_SNAKE_CASE : List[str] = generate_summaries_or_translations(
_lowercase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowercase , )
if args.reference_path is None:
return {}
# Compute scores
SCREAMING_SNAKE_CASE : Dict = calculate_bleu if '''translation''' in args.task else calculate_rouge
SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
SCREAMING_SNAKE_CASE : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowercase )]
SCREAMING_SNAKE_CASE : dict = score_fn(_lowercase , _lowercase )
scores.update(_lowercase )
if args.dump_args:
scores.update(_lowercase )
if args.info:
SCREAMING_SNAKE_CASE : Tuple = args.info
if verbose:
print(_lowercase )
if args.score_path is not None:
json.dump(_lowercase , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 182 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCamelCase_ ) , '''Tatoeba directory does not exist.''' )
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self ):
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : int = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__SCREAMING_SNAKE_CASE )
assert mmeta["long_pair"] == "heb-eng"
| 264 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = checkpoints.load_tax_checkpoint(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = flatten_dict(__SCREAMING_SNAKE_CASE )
return flax_params
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
lowercase_ : int = {}
lowercase_ : Any = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowercase_ : Tuple = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase_ : Tuple = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase_ : Optional[Any] = new_key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase_ : Optional[Any] = new_key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase_ : List[Any] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase_ : str = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = flax_dict[key]
lowercase_ : Any = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase_ : str = torch.from_numpy(converted_dict[key].T )
else:
lowercase_ : str = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : List[Any]=False ):
"""simple docstring"""
lowercase_ : List[str] = get_flax_param(__SCREAMING_SNAKE_CASE )
if not use_large:
lowercase_ : List[str] = PixaStructVisionConfig()
lowercase_ : Optional[Any] = PixaStructTextConfig()
else:
lowercase_ : Optional[int] = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase_ : Dict = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowercase_ : str = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = PixaStructForConditionalGeneration(__SCREAMING_SNAKE_CASE )
lowercase_ : int = rename_and_convert_flax_params(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
lowercase_ : str = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowercase_ : List[Any] = PixaStructImageProcessor()
lowercase_ : int = PixaStructProcessor(image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
if use_large:
lowercase_ : Tuple = 4096
lowercase_ : Optional[int] = True
# mkdir if needed
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
print('''Model saved in {}'''.format(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowercase : str = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
_lowercase : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 264 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = 'instructblip_vision_model'
def __init__(self : Union[str, Any] , __UpperCAmelCase : Optional[int]=1_4_0_8 , __UpperCAmelCase : List[Any]=6_1_4_4 , __UpperCAmelCase : Optional[int]=3_9 , __UpperCAmelCase : Optional[Any]=1_6 , __UpperCAmelCase : Optional[Any]=2_2_4 , __UpperCAmelCase : Optional[Any]=1_4 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Tuple=1E-6 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Tuple=1E-10 , __UpperCAmelCase : int=True , **__UpperCAmelCase : Union[str, Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = qkv_bias
@classmethod
def lowercase_ (cls : Union[str, Any] , __UpperCAmelCase : Union[str, os.PathLike] , **__UpperCAmelCase : Tuple ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCAmelCase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = 'instructblip_qformer'
def __init__(self : str , __UpperCAmelCase : Tuple=3_0_5_2_2 , __UpperCAmelCase : Dict=7_6_8 , __UpperCAmelCase : Any=1_2 , __UpperCAmelCase : List[Any]=1_2 , __UpperCAmelCase : Any=3_0_7_2 , __UpperCAmelCase : List[str]="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=5_1_2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : List[str]=1E-12 , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Union[str, Any]="absolute" , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : Any=1_4_0_8 , **__UpperCAmelCase : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = cross_attention_frequency
UpperCAmelCase__ = encoder_hidden_size
@classmethod
def lowercase_ (cls : Optional[Any] , __UpperCAmelCase : Union[str, os.PathLike] , **__UpperCAmelCase : Dict ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCAmelCase__ = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Dict = 'instructblip'
__UpperCAmelCase : Optional[int] = True
def __init__(self : Tuple , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : int=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=3_2 , **__UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
if vision_config is None:
UpperCAmelCase__ = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
UpperCAmelCase__ = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
UpperCAmelCase__ = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
UpperCAmelCase__ = InstructBlipVisionConfig(**__UpperCAmelCase )
UpperCAmelCase__ = InstructBlipQFormerConfig(**__UpperCAmelCase )
UpperCAmelCase__ = text_config["model_type"] if "model_type" in text_config else "opt"
UpperCAmelCase__ = CONFIG_MAPPING[text_model_type](**__UpperCAmelCase )
UpperCAmelCase__ = self.text_config.tie_word_embeddings
UpperCAmelCase__ = self.text_config.is_encoder_decoder
UpperCAmelCase__ = num_query_tokens
UpperCAmelCase__ = self.vision_config.hidden_size
UpperCAmelCase__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase__ = 1.0
UpperCAmelCase__ = 0.02
@classmethod
def lowercase_ (cls : List[Any] , __UpperCAmelCase : InstructBlipVisionConfig , __UpperCAmelCase : InstructBlipQFormerConfig , __UpperCAmelCase : PretrainedConfig , **__UpperCAmelCase : List[Any] , ) -> Tuple:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__UpperCAmelCase , )
def lowercase_ (self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.vision_config.to_dict()
UpperCAmelCase__ = self.qformer_config.to_dict()
UpperCAmelCase__ = self.text_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 65 | from __future__ import annotations
class A :
def __init__(self : Union[str, Any] , __UpperCAmelCase : list[list[int]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(__UpperCAmelCase ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__UpperCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def lowercase_ (self : Any ) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
return len(self.rows )
@property
def lowercase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.rows[0] )
@property
def lowercase_ (self : List[Any] ) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase_ (self : Any ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : int ) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return bool(self.determinant() )
def lowercase_ (self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__UpperCAmelCase ).determinant()
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
return -1 * self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(__UpperCAmelCase , __UpperCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase_ (self : List[str] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase_ (self : Optional[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__(self : Dict ) -> str:
"""simple docstring"""
return str(self.rows )
def __str__(self : Optional[Any] ) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__UpperCAmelCase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in column:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__(self : Any , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__(self : int , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
return not self == other
def __neg__(self : Dict ) -> Matrix:
"""simple docstring"""
return self * -1
def __add__(self : Dict , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__(self : Optional[Any] , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__(self : Tuple , __UpperCAmelCase : Matrix | int | float ) -> Matrix:
"""simple docstring"""
if isinstance(__UpperCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(__UpperCAmelCase , __UpperCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__(self : List[Any] , __UpperCAmelCase : int ) -> Matrix:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase_ (cls : Dict , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] ) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = CpmAntTokenizer
__snake_case = False
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
A__ : List[Any] =[
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
A__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : Tuple =CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
A__ : Any ="""今天天气真好!"""
A__ : Dict =["""今天""", """天气""", """真""", """好""", """!"""]
A__ : Union[str, Any] =tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Any ="""今天天气真好!"""
A__ : str =[tokenizer.bos_token] + tokens
A__ : Dict =[6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
A__ : Union[str, Any] =tokenizer.decode(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 366 |
'''simple docstring'''
import torch
from torch import nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : str=False ) -> List[str]:
'''simple docstring'''
super().__init__()
A__ : Any =n_token
A__ : int =d_embed
A__ : Any =d_proj
A__ : Tuple =cutoffs + [n_token]
A__ : Optional[Any] =[0] + self.cutoffs
A__ : Dict =div_val
A__ : str =self.cutoffs[0]
A__ : Optional[Any] =len(self.cutoffs ) - 1
A__ : List[Any] =self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
A__ : Any =nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
A__ : str =nn.Parameter(torch.zeros(self.n_clusters ) )
A__ : Union[str, Any] =nn.ModuleList()
A__ : Optional[int] =nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
else:
self.out_projs.append(lowerCAmelCase_ )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
A__ , A__ : Optional[int] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ : Tuple =d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , r_idx - l_idx ) )
A__ : Optional[int] =keep_order
def lowercase__ ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
if proj is None:
A__ : Optional[int] =nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
A__ : Optional[int] =nn.functional.linear(lowerCAmelCase_ , proj.t().contiguous() )
A__ : Union[str, Any] =nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowercase__ ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Dict=False ) -> Optional[int]:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
A__ : Optional[Any] =hidden[..., :-1, :].contiguous()
A__ : List[Any] =labels[..., 1:].contiguous()
A__ : Optional[int] =hidden.view(-1 , hidden.size(-1 ) )
A__ : str =labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
A__ : Optional[int] =hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
A__ : Optional[Any] =self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
A__ : Tuple =labels != -1_00
A__ : int =torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
A__ : Union[str, Any] =(
-nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
A__ : List[Any] =nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
A__ , A__ : Any =[], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A__ , A__ : Optional[int] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ : int =self.out_layers[0].weight[l_idx:r_idx]
A__ : List[str] =self.out_layers[0].bias[l_idx:r_idx]
else:
A__ : List[str] =self.out_layers[i].weight
A__ : Union[str, Any] =self.out_layers[i].bias
if i == 0:
A__ : Tuple =torch.cat([weight_i, self.cluster_weight] , dim=0 )
A__ : List[str] =torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
A__ , A__ , A__ : Tuple =weights[0], biases[0], self.out_projs[0]
A__ : List[Any] =self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : int =nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
if labels is None:
A__ : Union[str, Any] =hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
A__ : Union[str, Any] =torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
A__ : Any =0
A__ : Tuple =[0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
A__ , A__ : Tuple =cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
A__ : Tuple =(labels >= l_idx) & (labels < r_idx)
A__ : Any =mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
A__ : int =labels.index_select(0 , lowerCAmelCase_ ) - l_idx
A__ : List[str] =head_logprob.index_select(0 , lowerCAmelCase_ )
A__ : str =hidden.index_select(0 , lowerCAmelCase_ )
else:
A__ : Optional[Any] =hidden
if i == 0:
if labels is not None:
A__ : Optional[Any] =head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
A__ : Union[str, Any] =head_logprob[:, : self.cutoffs[0]]
else:
A__ , A__ , A__ : Dict =weights[i], biases[i], self.out_projs[i]
A__ : List[Any] =self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : List[Any] =nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
A__ : Optional[Any] =self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
A__ : Union[str, Any] =head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
A__ : List[str] =head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
A__ : Tuple =logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
if self.n_clusters == 0:
A__ : List[str] =self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
A__ , A__ : List[str] =[], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
A__ , A__ : int =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ : List[str] =self.out_layers[0].weight[l_idx:r_idx]
A__ : List[Any] =self.out_layers[0].bias[l_idx:r_idx]
else:
A__ : Dict =self.out_layers[i].weight
A__ : Any =self.out_layers[i].bias
if i == 0:
A__ : List[str] =torch.cat([weight_i, self.cluster_weight] , dim=0 )
A__ : Tuple =torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
A__ , A__ , A__ : Optional[int] =weights[0], biases[0], self.out_projs[0]
A__ : Any =self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Dict =hidden.new_empty((head_logit.size(0 ), self.n_token) )
A__ : Dict =nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
A__ : Tuple =[0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
A__ , A__ : List[Any] =cutoff_values[i], cutoff_values[i + 1]
if i == 0:
A__ : Tuple =head_logprob[:, : self.cutoffs[0]]
else:
A__ , A__ , A__ : Any =weights[i], biases[i], self.out_projs[i]
A__ : Dict =self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : str =nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
A__ : str =head_logprob[:, -i] + tail_logprob_i
A__ : List[Any] =logprob_i
return out
| 136 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = KandinskyImgaImgPipeline
__UpperCAmelCase : Dict = ["prompt", "image_embeds", "negative_image_embeds", "image"]
__UpperCAmelCase : Union[str, Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
__UpperCAmelCase : Optional[int] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__UpperCAmelCase : List[str] = False
@property
def __snake_case ( self : List[Any] ) -> List[str]:
return 32
@property
def __snake_case ( self : Tuple ) -> List[str]:
return 32
@property
def __snake_case ( self : Any ) -> List[str]:
return self.time_input_dim
@property
def __snake_case ( self : List[Any] ) -> Any:
return self.time_input_dim * 4
@property
def __snake_case ( self : Dict ) -> List[str]:
return 100
@property
def __snake_case ( self : str ) -> List[str]:
__snake_case : int = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __snake_case ( self : List[str] ) -> int:
torch.manual_seed(0 )
__snake_case : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__snake_case : List[str] = MultilingualCLIP(lowerCamelCase )
__snake_case : int = text_encoder.eval()
return text_encoder
@property
def __snake_case ( self : List[str] ) -> Optional[Any]:
torch.manual_seed(0 )
__snake_case : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__snake_case : List[str] = UNetaDConditionModel(**lowerCamelCase )
return model
@property
def __snake_case ( self : Dict ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
__snake_case : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case ( self : Any ) -> int:
__snake_case : Optional[int] = self.dummy_text_encoder
__snake_case : Union[str, Any] = self.dummy_tokenizer
__snake_case : int = self.dummy_unet
__snake_case : Optional[Any] = self.dummy_movq
__snake_case : Optional[int] = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__snake_case : List[Any] = DDIMScheduler(**lowerCamelCase )
__snake_case : Dict = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __snake_case ( self : Dict , lowerCamelCase : Dict , lowerCamelCase : str=0 ) -> Any:
__snake_case : Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__snake_case : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCamelCase )
# create init_image
__snake_case : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__snake_case : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : Dict = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("RGB" ).resize((256, 256) )
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : Tuple = torch.manual_seed(lowerCamelCase )
else:
__snake_case : Optional[int] = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : int = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __snake_case ( self : Union[str, Any] ) -> List[Any]:
__snake_case : List[str] = "cpu"
__snake_case : int = self.get_dummy_components()
__snake_case : int = self.pipeline_class(**lowerCamelCase )
__snake_case : Optional[int] = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Optional[int] = pipe(**self.get_dummy_inputs(lowerCamelCase ) )
__snake_case : List[Any] = output.images
__snake_case : Dict = pipe(
**self.get_dummy_inputs(lowerCamelCase ) , return_dict=lowerCamelCase , )[0]
__snake_case : Dict = image[0, -3:, -3:, -1]
__snake_case : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : List[str] = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[str] ) -> int:
__snake_case : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__snake_case : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__snake_case : Tuple = "A red cartoon frog, 4k"
__snake_case : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase )
__snake_case : List[str] = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__snake_case : Any = pipeline.to(lowerCamelCase )
pipeline.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
__snake_case , __snake_case : str = pipe_prior(
lowerCamelCase , generator=lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__snake_case : Any = pipeline(
lowerCamelCase , image=lowerCamelCase , image_embeds=lowerCamelCase , negative_image_embeds=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
__snake_case : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
| 123 |
from __future__ import annotations
from collections import namedtuple
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Any = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
a ="""bart"""
a =True
@st.cache(allow_output_mutation=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
if LOAD_DENSE_INDEX:
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__lowerCamelCase : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__lowerCamelCase : Optional[Any] = qar_model.eval()
else:
__lowerCamelCase : Dict = (None, None)
if MODEL_TYPE == "bart":
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__lowerCamelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__lowerCamelCase : int = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__lowerCamelCase : Optional[Any] = sas_model.eval()
else:
__lowerCamelCase : Tuple = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
if LOAD_DENSE_INDEX:
__lowerCamelCase : List[Any] = faiss.StandardGpuResources()
__lowerCamelCase : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__lowerCamelCase : List[Any] = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 1_2_8) , )
__lowerCamelCase : Tuple = faiss.IndexFlatIP(1_2_8 )
__lowerCamelCase : Tuple = faiss.index_cpu_to_gpu(lowerCamelCase__ , 1 , lowerCamelCase__ )
wikiaab_gpu_index_flat.add(lowerCamelCase__ ) # TODO fix for larger GPU
else:
__lowerCamelCase : List[str] = (None, None)
__lowerCamelCase : List[str] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
__lowerCamelCase : str = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__lowerCamelCase : Optional[int] = elia['train_eli5']
__lowerCamelCase : Any = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 1_2_8) )
__lowerCamelCase : Optional[int] = faiss.IndexFlatIP(1_2_8 )
eli5_train_q_index.add(lowerCamelCase__ )
return (elia_train, eli5_train_q_index)
a , a , a =load_indexes()
a , a , a , a =load_models()
a , a =load_train_data()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=1_0 ) -> str:
__lowerCamelCase : Any = embed_questions_for_retrieval([question] , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[Any] = eli5_train_q_index.search(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Any = [elia_train[int(lowerCamelCase__ )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__="wiki40b" , lowerCamelCase__="dense" , lowerCamelCase__=1_0 ) -> int:
if source == "none":
__lowerCamelCase : Any = (' <P> '.join(['' for _ in range(1_1 )] ).strip(), [])
else:
if method == "dense":
__lowerCamelCase : List[str] = query_qa_dense_index(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
__lowerCamelCase : Optional[int] = query_es_index(
lowerCamelCase__ , lowerCamelCase__ , index_name='english_wiki40b_snippets_100w' , n_results=lowerCamelCase__ , )
__lowerCamelCase : Optional[int] = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__lowerCamelCase : Dict = 'question: {} context: {}'.format(lowerCamelCase__ , lowerCamelCase__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase__ : None),
} )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=6_4 , lowerCamelCase__=2_5_6 , lowerCamelCase__=False , lowerCamelCase__=2 , lowerCamelCase__=0.95 , lowerCamelCase__=0.8 ) -> Union[str, Any]:
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = qa_sas_generate(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , num_answers=1 , num_beams=lowerCamelCase__ , min_len=lowerCamelCase__ , max_len=lowerCamelCase__ , do_sample=lowerCamelCase__ , temp=lowerCamelCase__ , top_p=lowerCamelCase__ , top_k=lowerCamelCase__ , max_input_length=1_0_2_4 , device='cuda:0' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
a ="""<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
a ="""
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
a ="""
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
a =[
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
a =st.sidebar.checkbox("""Demo options""")
if demo_options:
a =st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
a =action_list.index(action_st)
a =st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
a =show_type == """Show full text of passages"""
else:
a =3
a =True
a =st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
a ="""
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
a =st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
a =st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
a ="""wiki40b"""
a ="""dense"""
a ="""beam"""
a =2
a =64
a =256
a =None
a =None
a =st.sidebar.checkbox("""Generation options""")
if generate_options:
a ="""
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
a =st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
a =st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
a =st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
a =st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
a =st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
a =st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
a =None
# start main text
a =[
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
a =st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
a =st.text_input("""Enter your question here:""", """""")
else:
a =question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
a , a =make_support(question, source=wiki_source, method="""dense""", n_results=10)
a , a =make_support(question, source=wiki_source, method="""sparse""", n_results=10)
a =[]
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
a =support_list[:10]
a ="""<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
a , a =make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
a , a =answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
a ="""https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
a =res[1].strip()
if sec_titles == "":
a ="""[{}]({})""".format(res[0], wiki_url)
else:
a =sec_titles.split(""" & """)
a =""" & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
a =find_nearest_training(question)
a =nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
a =[
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
a ="""
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 366 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Tuple = TFCamembertModel.from_pretrained('jplu/tf-camembert-base')
__lowerCamelCase : str = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__)['last_hidden_state']
__lowerCamelCase : Dict = tf.TensorShape((1, 1_0, 7_6_8))
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__)
# compare the actual values for a slice.
__lowerCamelCase : Any = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4))
| 113 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.